BIND 10 trac1390, updated. ab3f90da16d31fc6833d869686e07729d9b8c135 [1390] Merge branch 'master' into trac1390 with fixing conflicts: src/bin/xfrout/tests/xfrout_test.py.in src/bin/xfrout/xfrout.py.in src/bin/xfrout/xfrout_messages.mes

BIND 10 source code commits bind10-changes at lists.isc.org
Tue Nov 22 02:40:22 UTC 2011


The branch, trac1390 has been updated
       via  ab3f90da16d31fc6833d869686e07729d9b8c135 (commit)
       via  710e8207090f894b14eaa9834a9c6cd551ea950d (commit)
       via  80c131f5b0763753d199b0fb9b51f10990bcd92b (commit)
       via  a01eb512f67a14855fc9be9fff561c3c86634e0b (commit)
       via  635662711c673bbcfc8fac95c96cfdc33702ca94 (commit)
       via  15e23bca2cf7f266d32c6bb30a142a80ee543227 (commit)
       via  ec1cc2b4be6e19519644534889865a3ee2c81a8a (commit)
       via  277b80e0671586d8ace205cb53465b1f6f414466 (commit)
       via  a435f3ac50667bcb76dca44b7b5d152f45432b57 (commit)
       via  6dd270220a4bac70fa4cd6a898e331b658fe0af2 (commit)
       via  1bb5168b7014a83690d1bb363dbcc0fa6d8fd7f1 (commit)
       via  ddb6d109c0947f203eaa6265a22d2fb3b166db0b (commit)
       via  2eb9f486619e27aee0684f840c85d152b3ddfe0f (commit)
       via  71378c1048bb610c748788dabfd04e421f6b4ac0 (commit)
       via  de43982b90d0fafd6b4e1857e366a6cd983cfab7 (commit)
       via  77d69c99f2b3cc8ee627d8f73174ead9f03da412 (commit)
       via  13f108b25fbccc56b731bd5bcc505cdf48e91e91 (commit)
       via  4d3aef6a83965e26781d6b74f0ff913926845c7c (commit)
       via  fb33c8d379f9e75b82edafff45d4dc13fda62630 (commit)
       via  4f02b45248227dd98904b61bbcd2e6cff36b5fd6 (commit)
       via  54d9d7c1597df3bcdf47d07db040f63f7008c6a7 (commit)
       via  48c07943ac1dd24922f46cf970c214b5cf24813f (commit)
       via  bea7b0e3fde35a335bb9e6cf170b0fc240650275 (commit)
       via  9b1c64b7d164b6b27d126e55391b2bbafeaf8c00 (commit)
       via  96bf3ab5271347542e13b52e2c37b9c8810a6fad (commit)
       via  c59bb2dcd90a5d580a7f3c9e42a54a080f763add (commit)
       via  319bc2d65301606aa938363dcb30a8519755886e (commit)
       via  d953caeeaf821743ed27ef4a47a45bef66615dc9 (commit)
       via  5d382b4295b8455fae844a5ca94886788f6cb19b (commit)
       via  d08c42ad20f2c91bf64ef47ed893fa2aac4ff037 (commit)
       via  08915b387e64f3cf9d9a86a5a21c4492db3a488c (commit)
       via  1d4541dfd067cd2f0c9e155049c2b7f9d70fa896 (commit)
       via  ecf6a71b5845c6710119dd97b500c7edeb3f44c2 (commit)
       via  a24c6579ab039afd67ecb50a71b9fc8eabf9b6c7 (commit)
       via  3647e8ff9c194c1c0a576558f4f49ba4ff2614e7 (commit)
       via  c3d71baca757b39e13968369e0afb39dd4472eb8 (commit)
       via  a9040d4aba8e3c01a77236c81f07e2b06b300918 (commit)
       via  35556de064c193779c3cd5e5b0fde583f4a8d598 (commit)
       via  c4f22c20ee19e1ffba43914671c059a434f4518c (commit)
       via  12b72af07f5e06cf172b115b0acba3fbe3554467 (commit)
       via  ecd9c5fc4b3cf747e2b5a221504feac3adeb236e (commit)
       via  fc0a31681f7a8e4198068be0038eb9a4f8a74ec7 (commit)
       via  d3db538710b6547cc2e04127fb5fc9d2d5a181f9 (commit)
       via  2ab2fd55d4a12d1469060a3657893121114e2e2f (commit)
       via  2dd7ee33a13a07a00e22fbc81ecb8b19b57efa8f (commit)
       via  5cea4cfbee9770f4299f5a701af89f7cbf977ef4 (commit)
       via  1af57091dc0c38cff538de2470275f25caeb2eab (commit)
       via  256c0a08483ac2bf396dfa8424b4c02f0681a0f4 (commit)
       via  8f74718cc2012ca68a44d9ed9996f479c6834101 (commit)
       via  5c92f567d93977bd56a7ed2898c7bee098b552ab (commit)
       via  956a0a589db0a8250ec94ece377657783ac15caf (commit)
       via  39def1d39c9543fc485eceaa5d390062edb97676 (commit)
       via  bcb432839cacdf10172d49dec94292871aee3526 (commit)
       via  164d651a0e4c1059c71f56b52ea87ac72b7f6c77 (commit)
       via  09f6d6281a4203a91dcfb6c56e240c06f11935b6 (commit)
       via  76fb414ea5257b639ba58ee336fae9a68998b30d (commit)
       via  e5f37058b67c641b8eb024bd48ca269ae9e41163 (commit)
       via  934a07b6d0ebec8bab258398894905de32878a8b (commit)
       via  40f6dd2b378f31f4ec561eeeac534874a02a8ae8 (commit)
       via  84fa061af28d72e51939039bfcbb04e1febc3cb1 (commit)
       via  b54f1b460285db4d6ae89dd716098a88363b1511 (commit)
       via  c1138d13b2692fa3a4f2ae1454052c866d24e654 (commit)
       via  4df29b3303dbce85b8143d8d74935b3c9283fb31 (commit)
       via  ed91f985331705fc345bec838697c9bda4b6b7e4 (commit)
       via  1219d81b49e51adece77dc57b5902fa1c6be1407 (commit)
       via  8380ccceca1b8412fbc6742cb37dbd7de843ac50 (commit)
       via  38d84c59fbc097e57d03ac10d6a83edc63c4cffa (commit)
       via  c0cc183880fc5e1949bcc97585c20ac2ab21e281 (commit)
       via  2d85e22f10321fbc5b9cd12f70e90907cb01830f (commit)
       via  631c5c2d24ba8be2b12930cc8267b2298414d563 (commit)
       via  ddf219d781a40764999bd8b19c80f607c2783b57 (commit)
       via  4a68215905542025570f06fcc703fa44d6b37cfd (commit)
       via  315f4999df039dbb2baa77ee12afa0dfbe01dc25 (commit)
       via  7344d2788cd06e54ca7ca3e3a3f69010dac80670 (commit)
       via  ce546dddcbbf7efc4778c1d0d4210ca139ed5bf9 (commit)
       via  fa89a0798d166574e089b38d7bd43a701eda5467 (commit)
       via  12b1a920f219e627bb5860f0a0217cc5c86749e5 (commit)
       via  cd342dae58399be6cdfad55a466a76ee385ccb08 (commit)
       via  f9e81512329b71d6b5d94bafa789c63e763b2a72 (commit)
       via  226dc3ee718e1320079d6c6d8642e0f0dda1bdef (commit)
       via  962a91763b9ef79e887e52e22fa23462ff7d680e (commit)
       via  170936d47b2e9ad3d5c3ceabf86026fca9795150 (commit)
       via  dbf32272f3b76b90678add39038fb6978c03ab3e (commit)
       via  295732d42d2b0a9641edfa352087033d8eff2794 (commit)
       via  9f89f07adcc9ccdde454016f037076e04eb791c1 (commit)
       via  fdefb47da0a5d7203496738ba03d4e1737e8149e (commit)
       via  93a5d45d9c1aa90249494608b8c2829059cc3b28 (commit)
       via  c1f5fb059e9c272dedc27a3f14fa8ed2fec71b95 (commit)
       via  fd1ae8e05771b151877ae3c082a7b3e3b32a20c7 (commit)
       via  21887dffc4cd692ce23bfff1685fba0e2c1e55b0 (commit)
       via  466a968426ed9062d86239560492edf7dc72ee02 (commit)
       via  a59f28758abdb92721e010956bd421148643377b (commit)
       via  e09910d37b783b182ae2dc83f6cb272bff68cbb6 (commit)
       via  648a187c5d7181019dc19531a1057bc3e6f70e96 (commit)
       via  16b7feca0339f67acae30eb67d913bd3ef0298be (commit)
       via  ff5154291678973eaa0483518302b74a62f0acba (commit)
       via  c4c93896137dd936066cd1a714569468bf248451 (commit)
       via  9bab697bc984a6565a6f0dfe8a981f4809edc91c (commit)
       via  ab406229e29b7cfc470142ee0166086bf70790a3 (commit)
       via  e24f557e8208f43a8ade0855395c87b175bc351c (commit)
       via  3f93372ba9416c9d759ea0c6d8981837c036448e (commit)
       via  48ee64bfbde99ce88eb305d2a751283b42c826ad (commit)
       via  cfecb1acb98f45a12864b7730ea58afbeb674c7b (commit)
       via  9ab6902f20b57452eaecf8f737d37f8dedcd623a (commit)
       via  8c57956e16dd09b528cd11dbf4c2fa51e48da359 (commit)
       via  e84f2aa5e9e493aa7dadfbd3b31753b5837d9069 (commit)
       via  dabf62d5444fe3a1e55e72aa393e0dddf188df7b (commit)
       via  ca3d2d1badee8e5e6d3c1f73fb29afdcc7692fa6 (commit)
       via  94ec743d73153258d8a231e2e5126749ea00e3c8 (commit)
       via  dca136175cf0dde67a63f40953187ca60f90caad (commit)
       via  625aea76304c024102cb5065f910e5121b1641f7 (commit)
       via  a4c51111cc0fc28c6517a11f8ae88682ab8e6996 (commit)
       via  0878c77ba4bcbaeb509f2bb7c2d52ee62864dadc (commit)
       via  efeb506e624945c6f21755621897a088715045b7 (commit)
       via  fda514f6b5ff65648709273dc62f960d85f4e066 (commit)
       via  2afbc7d3564b16d49043d48fe5ed9dd343311861 (commit)
       via  ce28b51d36567d63b5258648f7fbe406baaa5677 (commit)
       via  9753568c850855beecaabf500aea33483369d64f (commit)
       via  0ad9b8c8482a134af7c47b64b412f642d08ce642 (commit)
       via  3a330862f1357a4e0edd570de5896785029f4530 (commit)
       via  d85912df5ef89ff95c3653403503f61d120a0761 (commit)
       via  06d6be693064252ed2535fc8685ca4e7b8db0989 (commit)
       via  cb1c34cd2ffb876819441b4869a66a4cb500a8ba (commit)
       via  a3a4e317a91c075f0d16de7d16cc652e508df101 (commit)
       via  96086ea69576acae7d59e1d7665f622bd526c7c1 (commit)
       via  7c229ebaca82e06899126f9b364fe524ec6d4b56 (commit)
       via  599ec7f889bba386c838ec85735b203514905d9d (commit)
      from  3ff33cfedcca0cd1acb80a5cf2651f89403a82a9 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit ab3f90da16d31fc6833d869686e07729d9b8c135
Merge: 3ff33cf 710e820
Author: JINMEI Tatuya <jinmei at isc.org>
Date:   Mon Nov 21 18:34:10 2011 -0800

    [1390] Merge branch 'master' into trac1390 with fixing conflicts:
    	src/bin/xfrout/tests/xfrout_test.py.in
    	src/bin/xfrout/xfrout.py.in
    	src/bin/xfrout/xfrout_messages.mes

-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                                          |   65 +++-
 Makefile.am                                        |    2 +-
 compatcheck/Makefile.am                            |    8 +
 compatcheck/README                                 |    5 +
 compatcheck/sqlite3-difftbl-check.py.in            |   60 +++
 configure.ac                                       |    3 +
 doc/guide/bind10-guide.xml                         |   66 +++-
 src/bin/bind10/bind10_src.py.in                    |  109 +----
 src/bin/bind10/tests/bind10_test.py.in             |   11 -
 src/bin/xfrin/tests/testdata/example.com.sqlite3   |  Bin 11264 -> 12288 bytes
 src/bin/xfrin/tests/xfrin_test.py                  |   22 +-
 src/bin/xfrin/xfrin.py.in                          |    5 +-
 src/bin/xfrout/b10-xfrout.xml                      |   25 ++
 src/bin/xfrout/tests/Makefile.am                   |    4 +-
 src/bin/xfrout/tests/testdata/creatediff.py        |   58 +++
 src/bin/xfrout/tests/testdata/example.com          |    2 +-
 src/bin/xfrout/tests/testdata/test.sqlite3         |  Bin 11264 -> 12288 bytes
 src/bin/xfrout/tests/xfrout_test.py.in             |  456 ++++++++++++++------
 src/bin/xfrout/xfrout.py.in                        |  276 ++++++++----
 src/bin/xfrout/xfrout_messages.mes                 |   56 ++-
 src/lib/datasrc/client.h                           |   51 +++
 src/lib/datasrc/database.cc                        |  101 +++++
 src/lib/datasrc/database.h                         |   17 +-
 src/lib/datasrc/datasrc_messages.mes               |   28 ++
 src/lib/datasrc/memory_datasrc.cc                  |    7 +
 src/lib/datasrc/memory_datasrc.h                   |    4 +
 src/lib/datasrc/sqlite3_accessor.h                 |    1 -
 src/lib/datasrc/tests/client_unittest.cc           |    7 +
 src/lib/datasrc/tests/database_unittest.cc         |  377 ++++++++++++++--
 src/lib/datasrc/tests/testdata/Makefile.am         |    5 -
 src/lib/datasrc/zone.h                             |   92 ++++
 src/lib/exceptions/exceptions.h                    |   11 +
 src/lib/python/isc/bind10/component.py             |   56 +++-
 src/lib/python/isc/bind10/tests/component_test.py  |  119 +++++-
 src/lib/python/isc/datasrc/Makefile.am             |    2 +
 src/lib/python/isc/datasrc/client_inc.cc           |   56 +++
 src/lib/python/isc/datasrc/client_python.cc        |   58 +++-
 src/lib/python/isc/datasrc/datasrc.cc              |   41 ++
 src/lib/python/isc/datasrc/journal_reader_inc.cc   |   80 ++++
 ...iterator_python.cc => journal_reader_python.cc} |  120 ++----
 .../{iterator_python.h => journal_reader_python.h} |   27 +-
 src/lib/python/isc/datasrc/sqlite3_ds.py           |    8 +
 src/lib/python/isc/datasrc/tests/Makefile.am       |    1 +
 src/lib/python/isc/datasrc/tests/datasrc_test.py   |  168 ++++++--
 .../datasrc/tests/testdata/test.sqlite3.nodiffs    |  Bin 43008 -> 43008 bytes
 src/lib/python/isc/log/log.cc                      |  188 ++++----
 src/lib/python/isc/log/tests/log_test.py           |   31 ++
 src/lib/python/isc/testutils/Makefile.am           |    2 +-
 src/lib/python/isc/testutils/rrset_utils.py        |   63 +++
 src/lib/python/isc/xfrin/diff.py                   |   18 +-
 src/lib/python/isc/xfrin/libxfrin_messages.mes     |   10 +
 src/lib/python/isc/xfrin/tests/diff_tests.py       |   26 +-
 52 files changed, 2336 insertions(+), 672 deletions(-)
 create mode 100644 compatcheck/Makefile.am
 create mode 100644 compatcheck/README
 create mode 100755 compatcheck/sqlite3-difftbl-check.py.in
 create mode 100755 src/bin/xfrout/tests/testdata/creatediff.py
 create mode 100644 src/lib/python/isc/datasrc/journal_reader_inc.cc
 copy src/lib/python/isc/datasrc/{iterator_python.cc => journal_reader_python.cc} (61%)
 copy src/lib/python/isc/datasrc/{iterator_python.h => journal_reader_python.h} (61%)
 copy src/lib/{ => python/isc}/datasrc/tests/testdata/test.sqlite3.nodiffs (100%)
 create mode 100644 src/lib/python/isc/testutils/rrset_utils.py

-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index c04e845..f7bfb33 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,7 +1,62 @@
+327.	[func]		jinmei
+	b10-xfrout now supports IXFR.  (Right now there is no user
+	configurable parameter about this feature; b10-xfrout will
+	always respond to IXFR requests according to RFC1995).
+	Note also that Trac #1390 is necessary for outbound IXFR to work
+	in practice, so as of this writing this is not a user visible
+	feature.
+	(Trac #1371 and #1372, git 80c131f5b0763753d199b0fb9b51f10990bcd92b)
+
+326.	[build]*	jinmei
+	Added a check script for the SQLite3 schema version.  It will be
+	run at the beginning of 'make install', and if it detects an old
+	version of schema, installation will stop.  You'll then need to
+	upgrade the database file by following the error message.
+	(Trac #1404, git a435f3ac50667bcb76dca44b7b5d152f45432b57)
+
+325.	[func]		jinmei
+	Python isc.datasrc: added interfaces for difference management:
+	DataSourceClient.get_updater() now has the 'journaling' parameter
+	to enable storing diffs to the data source, and a new class
+	ZoneJournalReader was introduced to retrieve them, which can be
+	created by the new DataSourceClient.get_journal_reader() method.
+	(Trac #1333, git 3e19362bc1ba7dc67a87768e2b172c48b32417f5,
+	git 39def1d39c9543fc485eceaa5d390062edb97676)
+
+324.	[bug]		jinmei
+	Fixed reference leak in the isc.log Python module.  Most of all
+	BIND 10 Python programs had memory leak (even though the pace of
+	leak may be slow) due to this bug.
+	(Trac #1359, git 164d651a0e4c1059c71f56b52ea87ac72b7f6c77)
+
+323.	[bug]		jinmei
+	b10-xfrout incorrectly skipped adding TSIG RRs to some
+	intermediate responses (when TSIG is to be used for the
+	responses).  While RFC2845 optionally allows to skip intermediate
+	TSIGs (as long as the digest for the skipped part was included
+	in a later TSIG), the underlying TSIG API doesn't support this
+	mode of signing.
+	(Trac #1370, git 76fb414ea5257b639ba58ee336fae9a68998b30d)
+
+322.	[func]		jinmei
+	datasrc: Added C++ API for retrieving difference of two versions
+	of a zone.  A new ZoneJournalReader class was introduced for this
+	purpose, and a corresponding factory method was added to
+	DataSourceClient.
+	(Trac #1332, git c1138d13b2692fa3a4f2ae1454052c866d24e654)
+
+321.	[func]*		jinmei
+	b10-xfrin now installs IXFR differences into the underlying data
+	source (if it supports journaling) so that the stored differences
+	can be used for subsequent IXFR-out transactions.
+	Note: this is a backward incompatibility change for older sqlite3
+	database files.  They need to be upgraded to have a "diffs" table.
+	(Trac #1376, git 1219d81b49e51adece77dc57b5902fa1c6be1407)
+
 320.	[func]*		vorner
-	The --brittle switch was removed from the bind10 executable. It didn't
-	work after the #213 change and the same effect can be accomplished by
-	declaring all components as core.
+	The --brittle switch was removed from the bind10 executable.
+	It didn't work after change #316 (Trac #213) and the same
+	effect can be accomplished by declaring all components as core.
 	(Trac #1340, git f9224368908dd7ba16875b0d36329cf1161193f0)
 
 319.	[func]		naokikambe
@@ -15,12 +70,12 @@
 	only for the XML documents but also is for the XSD and XSL documents.
 	(Trac #917, git b34bf286c064d44746ec0b79e38a6177d01e6956)
 
-318.    [func]      stephen
+318.    [func]		stephen
 	Add C++ API for accessing zone difference information in database-based
 	data sources.
 	(Trac #1330, git 78770f52c7f1e7268d99e8bfa8c61e889813bb33)
 
-317.    [func]      vorner
+317.    [func]		vorner
 	datasrc: the getUpdater method of DataSourceClient supports an optional
 	'journaling' parameter to indicate the generated updater to store diffs.
 	The database based derived class implements this extension.
diff --git a/Makefile.am b/Makefile.am
index 50aa6b9..cc91a56 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = doc src tests
+SUBDIRS = compatcheck doc src tests
 USE_LCOV=@USE_LCOV@
 LCOV=@LCOV@
 GENHTML=@GENHTML@
diff --git a/compatcheck/Makefile.am b/compatcheck/Makefile.am
new file mode 100644
index 0000000..029578d
--- /dev/null
+++ b/compatcheck/Makefile.am
@@ -0,0 +1,8 @@
+noinst_SCRIPTS = sqlite3-difftbl-check.py
+
+# We're going to abuse install-data-local for a pre-install check.
+# This is to be considered a short term hack and is expected to be removed
+# in a near future version.
+install-data-local:
+	$(PYTHON) sqlite3-difftbl-check.py \
+	$(localstatedir)/$(PACKAGE)/zone.sqlite3
diff --git a/compatcheck/README b/compatcheck/README
new file mode 100644
index 0000000..8381e60
--- /dev/null
+++ b/compatcheck/README
@@ -0,0 +1,5 @@
+This directory is a collection of compatibility checker programs.
+They will be run before any other installation attempts on 'make install'
+to see if the installation causes any substantial compatibility problems
+with existing configuratons.  If any checker program finds an issue,
+'make install' will stop at that point.
diff --git a/compatcheck/sqlite3-difftbl-check.py.in b/compatcheck/sqlite3-difftbl-check.py.in
new file mode 100755
index 0000000..e3b7b91
--- /dev/null
+++ b/compatcheck/sqlite3-difftbl-check.py.in
@@ -0,0 +1,60 @@
+#!@PYTHON@
+
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os, sqlite3, sys
+from optparse import OptionParser
+
+usage = 'usage: %prog [options] db_file'
+parser = OptionParser(usage=usage)
+parser.add_option("-u", "--upgrade", action="store_true",
+                  dest="upgrade", default=False,
+                  help="Upgrade the database file [default: %default]")
+(options, args) = parser.parse_args()
+if len(args) == 0:
+    parser.error('missing argument')
+
+db_file = args[0]
+
+# If the file doesn't exist, there's nothing to do
+if not os.path.exists(db_file):
+    sys.exit(0)
+
+conn = sqlite3.connect(db_file)
+cur = conn.cursor()
+try:
+    # This can be anything that works iff the "diffs" table exists
+    cur.execute('SELECT name FROM diffs DESC LIMIT 1')
+except sqlite3.OperationalError as ex:
+    # If it fails with 'no such table', create a new one or fail with
+    # warning depending on the --upgrade command line option.
+    if str(ex) == 'no such table: diffs':
+        if options.upgrade:
+            cur.execute('CREATE TABLE diffs (id INTEGER PRIMARY KEY, ' +
+                        'zone_id INTEGER NOT NULL, ' +
+                        'version INTEGER NOT NULL, ' +
+                        'operation INTEGER NOT NULL, ' +
+                        'name STRING NOT NULL COLLATE NOCASE, ' +
+                        'rrtype STRING NOT NULL COLLATE NOCASE, ' +
+                        'ttl INTEGER NOT NULL, rdata STRING NOT NULL)')
+        else:
+            sys.stdout.write('Found an older version of SQLite3 DB file: ' +
+                             db_file + '\n' + "Perform '" + os.getcwd() +
+                             "/sqlite3-difftbl-check.py --upgrade " +
+                             db_file + "'\n" +
+                             'before continuing install.\n')
+            sys.exit(1)
+conn.close()
diff --git a/configure.ac b/configure.ac
index 2692ddb..8302e1e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -816,6 +816,7 @@ AM_CONDITIONAL(INSTALL_CONFIGURATIONS, test x$install_configurations = xyes || t
 AC_CONFIG_FILES([Makefile
                  doc/Makefile
                  doc/guide/Makefile
+                 compatcheck/Makefile
                  src/Makefile
                  src/bin/Makefile
                  src/bin/bind10/Makefile
@@ -937,6 +938,7 @@ AC_CONFIG_FILES([Makefile
                  tests/tools/badpacket/tests/Makefile
                ])
 AC_OUTPUT([doc/version.ent
+           compatcheck/sqlite3-difftbl-check.py
            src/bin/cfgmgr/b10-cfgmgr.py
            src/bin/cfgmgr/tests/b10-cfgmgr_test.py
            src/bin/cmdctl/cmdctl.py
@@ -1016,6 +1018,7 @@ AC_OUTPUT([doc/version.ent
            tests/system/ixfr/in-3/setup.sh
            tests/system/ixfr/in-4/setup.sh
           ], [
+           chmod +x compatcheck/sqlite3-difftbl-check.py
            chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
            chmod +x src/bin/xfrin/run_b10-xfrin.sh
            chmod +x src/bin/xfrout/run_b10-xfrout.sh
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 21bb671..711b144 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -1369,20 +1369,72 @@ what if a NOTIFY is sent?
       The <command>b10-xfrout</command> process is started by
       <command>bind10</command>.
       When the <command>b10-auth</command> authoritative DNS server
-      receives an AXFR request, <command>b10-xfrout</command>
-      sends the zone.
-      This is used to provide master DNS service to share zones
+      receives an AXFR or IXFR request, <command>b10-auth</command>
+      internally forwards the request to <command>b10-xfrout</command>,
+      which handles the rest of request processing.
+      This is used to provide primary DNS service to share zones
       to secondary name servers.
       The <command>b10-xfrout</command> is also used to send
-      NOTIFY messages to slaves.
+      NOTIFY messages to secondary servers.
     </para>
 
+    <para>
+      A global or per zone <option>transfer_acl</option> configuration
+      can be used to control accessibility of the outbound zone
+      transfer service.
+      By default, <command>b10-xfrout</command> allows any clients to
+      perform zone transfers for any zones:
+    </para>
+
+      <screen>> <userinput>config show Xfrout/transfer_acl</userinput>
+Xfrout/transfer_acl[0]	{"action": "ACCEPT"}	any	(default)</screen>
+
+    <para>
+      You can change this to, for example, rejecting all transfer
+      requests by default while allowing requests for the transfer
+      of zone "example.com" from 192.0.2.1 and 2001:db8::1 as follows:
+    </para>
+
+      <screen>> <userinput>config set Xfrout/transfer_acl[0] {"action": "REJECT"}</userinput>
+> <userinput>config add Xfrout/zone_config</userinput>
+> <userinput>config set Xfrout/zone_config[0]/origin "example.com"</userinput>
+> <userinput>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},</userinput>
+<userinput>                                                 {"action": "ACCEPT", "from": "2001:db8::1"}]</userinput>
+> <userinput>config commit</userinput></screen>
+
     <note><simpara>
-     The current development release of BIND 10 only supports
-     AXFR. (IXFR is not supported.)
-     Access control is not yet provided.
+	In the above example the lines
+	for <option>transfer_acl</option> were divided for
+	readability.  In the actual input it must be in a single line.
     </simpara></note>
 
+    <para>
+      If you want to require TSIG in access control, a separate TSIG
+      "key ring" must be configured specifically
+      for <command>b10-xfrout</command> as well as a system wide
+      key ring, both containing a consistent set of keys.
+      For example, to change the previous example to allowing requests
+      from 192.0.2.1 signed by a TSIG with a key name of
+      "key.example", you'll need to do this:
+    </para>
+
+    <screen>> <userinput>config set tsig_keys/keys ["key.example:<base64-key>"]</userinput>
+> <userinput>config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]</userinput>
+> <userinput>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]</userinput>
+> <userinput>config commit</userinput></screen>
+
+    <para>
+      The first line of configuration defines a system wide key ring.
+      This is necessary because the <command>b10-auth</command> server
+      also checks TSIGs and it uses the system wide configuration.
+    </para>
+
+    <note><simpara>
+	In a future version, <command>b10-xfrout</command> will also
+	use the system wide TSIG configuration.
+	The way to specify zone specific configuration (ACLs, etc) is
+	likely to be changed, too.
+    </simpara></note>
 
 <!--
 TODO:
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index 68ff97b..bf6079e 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -92,51 +92,6 @@ VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
 # This is for boot_time of Boss
 _BASETIME = time.gmtime()
 
-class RestartSchedule:
-    """
-Keeps state when restarting something (in this case, a process).
-
-When a process dies unexpectedly, we need to restart it. However, if 
-it fails to restart for some reason, then we should not simply keep
-restarting it at high speed.
-
-A more sophisticated algorithm can be developed, but for now we choose
-a simple set of rules:
-
-  * If a process was been running for >=10 seconds, we restart it
-    right away.
-  * If a process was running for <10 seconds, we wait until 10 seconds
-    after it was started.
-
-To avoid programs getting into lockstep, we use a normal distribution
-to avoid being restarted at exactly 10 seconds."""
-
-    def __init__(self, restart_frequency=10.0):
-        self.restart_frequency = restart_frequency
-        self.run_start_time = None
-        self.run_stop_time = None
-        self.restart_time = None
-    
-    def set_run_start_time(self, when=None):
-        if when is None:
-            when = time.time()
-        self.run_start_time = when
-        sigma = self.restart_frequency * 0.05
-        self.restart_time = when + random.normalvariate(self.restart_frequency, 
-                                                        sigma)
-
-    def set_run_stop_time(self, when=None):
-        """We don't actually do anything with stop time now, but it 
-        might be useful for future algorithms."""
-        if when is None:
-            when = time.time()
-        self.run_stop_time = when
-
-    def get_restart_time(self, when=None):
-        if when is None:
-            when = time.time()
-        return max(when, self.restart_time)
-
 class ProcessInfoError(Exception): pass
 
 class ProcessInfo:
@@ -151,7 +106,6 @@ class ProcessInfo:
         self.env = env
         self.dev_null_stdout = dev_null_stdout
         self.dev_null_stderr = dev_null_stderr
-        self.restart_schedule = RestartSchedule()
         self.uid = uid
         self.username = username
         self.process = None
@@ -200,7 +154,6 @@ class ProcessInfo:
                                         env=spawn_env,
                                         preexec_fn=self._preexec_work)
         self.pid = self.process.pid
-        self.restart_schedule.set_run_start_time()
 
     # spawn() and respawn() are the same for now, but in the future they
     # may have different functionality
@@ -239,13 +192,7 @@ class BoB:
         """
         self.cc_session = None
         self.ccs = None
-        self.cfg_start_auth = True
-        self.cfg_start_resolver = False
-        self.cfg_start_dhcp6 = False
-        self.cfg_start_dhcp4 = False
         self.curproc = None
-        # XXX: Not used now, waits for reintroduction of restarts.
-        self.dead_processes = {}
         self.msgq_socket_file = msgq_socket_file
         self.nocache = nocache
         self.component_config = {}
@@ -254,6 +201,9 @@ class BoB:
         # inapropriate. But as the code isn't probably completely ready
         # for it, we leave it at components for now.
         self.components = {}
+        # Simply list of components that died and need to wait for a
+        # restart. Components manage their own restart schedule now
+        self.components_to_restart = []
         self.runnable = False
         self.uid = setuid
         self.username = username
@@ -825,7 +775,11 @@ class BoB:
                     # Tell it it failed. But only if it matters (we are
                     # not shutting down and the component considers itself
                     # to be running.
-                    component.failed(exit_status);
+                    component_restarted = component.failed(exit_status);
+                    # if the process wants to be restarted, but not just yet,
+                    # it returns False
+                    if not component_restarted:
+                        self.components_to_restart.append(component)
             else:
                 logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
 
@@ -841,39 +795,22 @@ class BoB:
             timeout value.
 
         """
-        # TODO: This is an artefact of previous way of handling processes. The
-        # restart queue is currently empty at all times, so this returns None
-        # every time it is called (thought is a relict that is obviously wrong,
-        # it is called and it doesn't hurt).
-        #
-        # It is preserved for archeological reasons for the time when we return
-        # the delayed restarts, most of it might be useful then (or, if it is
-        # found useless, removed).
-        next_restart = None
-        # if we're shutting down, then don't restart
         if not self.runnable:
             return 0
-        # otherwise look through each dead process and try to restart
-        still_dead = {}
+        still_dead = []
+        # keep track of the first time we need to check this queue again,
+        # if at all
+        next_restart_time = None
         now = time.time()
-        for proc_info in self.dead_processes.values():
-            restart_time = proc_info.restart_schedule.get_restart_time(now)
-            if restart_time > now:
-                if (next_restart is None) or (next_restart > restart_time):
-                    next_restart = restart_time
-                still_dead[proc_info.pid] = proc_info
-            else:
-                logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
-                try:
-                    proc_info.respawn()
-                    self.components[proc_info.pid] = proc_info
-                    logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
-                except:
-                    still_dead[proc_info.pid] = proc_info
-        # remember any processes that refuse to be resurrected
-        self.dead_processes = still_dead
-        # return the time when the next process is ready to be restarted
-        return next_restart
+        for component in self.components_to_restart:
+            if not component.restart(now):
+                still_dead.append(component)
+                if next_restart_time is None or\
+                   next_restart_time > component.get_restart_time():
+                    next_restart_time = component.get_restart_time()
+        self.components_to_restart = still_dead
+
+        return next_restart_time
 
 # global variables, needed for signal handlers
 options = None
@@ -1056,10 +993,6 @@ def main():
     while boss_of_bind.runnable:
         # clean up any processes that exited
         boss_of_bind.reap_children()
-        # XXX: As we don't put anything into the processes to be restarted,
-        # this is really a complicated NOP. But we will try to reintroduce
-        # delayed restarts, so it stays here for now, until we find out if
-        # it's useful.
         next_restart = boss_of_bind.restart_processes()
         if next_restart is None:
             wait_time = None
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index e323113..db68b35 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -105,16 +105,10 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.cc_session, None)
         self.assertEqual(bob.ccs, None)
         self.assertEqual(bob.components, {})
-        self.assertEqual(bob.dead_processes, {})
         self.assertEqual(bob.runnable, False)
         self.assertEqual(bob.uid, None)
         self.assertEqual(bob.username, None)
         self.assertEqual(bob.nocache, False)
-        self.assertEqual(bob.cfg_start_auth, True)
-        self.assertEqual(bob.cfg_start_resolver, False)
-
-        self.assertEqual(bob.cfg_start_dhcp4, False)
-        self.assertEqual(bob.cfg_start_dhcp6, False)
 
     def test_init_alternate_socket(self):
         bob = BoB("alt_socket_file")
@@ -123,15 +117,10 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.cc_session, None)
         self.assertEqual(bob.ccs, None)
         self.assertEqual(bob.components, {})
-        self.assertEqual(bob.dead_processes, {})
         self.assertEqual(bob.runnable, False)
         self.assertEqual(bob.uid, None)
         self.assertEqual(bob.username, None)
         self.assertEqual(bob.nocache, False)
-        self.assertEqual(bob.cfg_start_auth, True)
-        self.assertEqual(bob.cfg_start_resolver, False)
-        self.assertEqual(bob.cfg_start_dhcp4, False)
-        self.assertEqual(bob.cfg_start_dhcp6, False)
 
     def test_command_handler(self):
         class DummySession():
diff --git a/src/bin/xfrin/tests/testdata/example.com.sqlite3 b/src/bin/xfrin/tests/testdata/example.com.sqlite3
index ed241c3..3538e3d 100644
Binary files a/src/bin/xfrin/tests/testdata/example.com.sqlite3 and b/src/bin/xfrin/tests/testdata/example.com.sqlite3 differ
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 1e4d942..3c41110 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -14,8 +14,10 @@
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
 import unittest
+import re
 import shutil
 import socket
+import sqlite3
 import sys
 import io
 from isc.testutils.tsigctx_mock import MockTSIGContext
@@ -170,7 +172,8 @@ class MockDataSourceClient():
             return (ZoneFinder.SUCCESS, dup_soa_rrset)
         raise ValueError('Unexpected input to mock finder: bug in test case?')
 
-    def get_updater(self, zone_name, replace):
+    def get_updater(self, zone_name, replace, journaling=False):
+        self._journaling_enabled = journaling
         return self
 
     def add_rrset(self, rrset):
@@ -1132,6 +1135,7 @@ class TestAXFR(TestXfrinConnection):
     def test_do_xfrin(self):
         self.conn.response_generator = self._create_normal_response_data
         self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+        self.assertFalse(self.conn._datasrc_client._journaling_enabled)
 
     def test_do_xfrin_with_tsig(self):
         # use TSIG with a mock context.  we fake all verify results to
@@ -1283,6 +1287,7 @@ class TestIXFRResponse(TestXfrinConnection):
             answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
         self.conn._handle_xfrin_responses()
         self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+        self.assertTrue(self.conn._datasrc_client._journaling_enabled)
         self.assertEqual([], self.conn._datasrc_client.diffs)
         check_diffs(self.assertEqual,
                     [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
@@ -1387,6 +1392,8 @@ class TestIXFRResponse(TestXfrinConnection):
             answers=[soa_rrset, ns_rr, a_rr, soa_rrset])
         self.conn._handle_xfrin_responses()
         self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        # In the case AXFR-style IXFR, journaling must have been disabled.
+        self.assertFalse(self.conn._datasrc_client._journaling_enabled)
         self.assertEqual([], self.conn._datasrc_client.diffs)
         # The SOA should be added exactly once, and in our implementation
         # it should be added at the end of the sequence.
@@ -1540,6 +1547,19 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
         self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
         self.assertEqual(1234, self.get_zone_serial())
 
+        # Also confirm the corresponding diffs are stored in the diffs table
+        conn = sqlite3.connect(self.sqlite3db_obj)
+        cur = conn.cursor()
+        cur.execute('SELECT name, rrtype, ttl, rdata FROM diffs ORDER BY id')
+        soa_rdata_base = 'master.example.com. admin.example.com. ' + \
+            'SERIAL 3600 1800 2419200 7200'
+        self.assertEqual(cur.fetchall(),
+                         [(TEST_ZONE_NAME_STR, 'SOA', 3600,
+                           re.sub('SERIAL', str(1230), soa_rdata_base)),
+                          (TEST_ZONE_NAME_STR, 'SOA', 3600,
+                           re.sub('SERIAL', str(1234), soa_rdata_base))])
+        conn.close()
+
     def test_do_ixfrin_sqlite3_fail(self):
         '''Similar to the previous test, but xfrin fails due to error.
 
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 911b3b3..445683e 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -367,7 +367,10 @@ class XfrinIXFRDeleteSOA(XfrinState):
                                  ' RR is given in IXFRDeleteSOA state')
         # This is the beginning state of one difference sequence (changes
         # for one SOA update).  We need to create a new Diff object now.
-        conn._diff = Diff(conn._datasrc_client, conn._zone_name)
+        # Note also that we (unconditionally) enable journaling here.  The
+        # Diff constructor may internally disable it, however, if the
+        # underlying data source doesn't support journaling.
+        conn._diff = Diff(conn._datasrc_client, conn._zone_name, False, True)
         conn._diff.delete_data(rr)
         self.set_xfrstate(conn, XfrinIXFRDelete())
         return True
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index 9889b80..4f6a7fa 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -98,6 +98,31 @@
       that can run concurrently. The default is 10.
     </para>
     <para>
+      <varname>tsig_key_ring</varname>
+      A list of TSIG keys (each of which is in the form of
+      name:base64-key[:algorithm]) used for access control on transfer
+      requests.
+      The default is an empty list.
+    </para>
+    <para>
+      <varname>transfer_acl</varname>
+      A list of ACL elements that apply to all transfer requests by
+      default (unless overridden in zone_config).  See the BIND 10
+      guide for configuration examples.
+      The default is an element that allows any transfer requests.
+    </para>
+    <para>
+      <varname>zone_config</varname>
+      A list of JSON objects (i.e. maps) that define per zone
+      configuration concerning <command>b10-xfrout</command>.
+      The supported names of each object are "origin" (the origin
+      name of the zone), "class" (the RR class of the zone, optional,
+      default to "IN"), and "acl_element" (ACL only applicable to
+      transfer requests for that zone).
+      See the BIND 10 guide for configuration examples.
+      The default is an empty list, that is, no zone specific configuration.
+    </para>
+    <para>
       <varname>log_name</varname>
 <!-- TODO -->
     </para>
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index 509df79..ad6d7e6 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -3,8 +3,8 @@ PYTESTS = xfrout_test.py
 noinst_SCRIPTS = $(PYTESTS)
 
 EXTRA_DIST = testdata/test.sqlite3
-# This one is actually not necessary, but added for reference
-EXTRA_DIST += testdata/example.com
+# These are actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com testdata/creatediff.py
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
diff --git a/src/bin/xfrout/tests/testdata/creatediff.py b/src/bin/xfrout/tests/testdata/creatediff.py
new file mode 100755
index 0000000..dab6622
--- /dev/null
+++ b/src/bin/xfrout/tests/testdata/creatediff.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3.1
+
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''This script was used to create zone differences for IXFR tests.
+
+The result was stored in the test SQLite3 database file, so this script
+itself isn't necessary for testing.  It's provided here for reference
+purposes.
+
+'''
+
+import isc.datasrc
+import isc.log
+from isc.dns import *
+from isc.testutils.rrset_utils import *
+
+isc.log.init("dummy")           # XXX
+
+ZONE_NAME = Name('example.com')
+NS_NAME_STR = 'a.dns.example.com'
+NS_NAME = Name(NS_NAME_STR)
+
+client = isc.datasrc.DataSourceClient('sqlite3',
+                                      '{ "database_file": "test.sqlite3" }')
+
+# Install the initial data
+updater = client.get_updater(ZONE_NAME, True)
+updater.add_rrset(create_soa(2011111802))
+updater.add_rrset(create_ns(NS_NAME_STR))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.53'))
+updater.add_rrset(create_aaaa(NS_NAME, '2001:db8::1'))
+updater.commit()
+
+# Incremental update to generate diffs
+updater = client.get_updater(ZONE_NAME, False, True)
+updater.delete_rrset(create_soa(2011111802))
+updater.add_rrset(create_soa(2011111900))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.2', 7200))
+updater.delete_rrset(create_soa(2011111900))
+updater.delete_rrset(create_a(NS_NAME, '192.0.2.53'))
+updater.delete_rrset(create_aaaa(NS_NAME, '2001:db8::1'))
+updater.add_rrset(create_soa(2011112001))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.1'))
+updater.commit()
diff --git a/src/bin/xfrout/tests/testdata/example.com b/src/bin/xfrout/tests/testdata/example.com
index 25c5e6a..8458d09 100644
--- a/src/bin/xfrout/tests/testdata/example.com
+++ b/src/bin/xfrout/tests/testdata/example.com
@@ -1,6 +1,6 @@
 ;; This is the source of a zone stored in test.sqlite3.  It's provided
 ;; for reference purposes only.
-example.com.         3600  IN  SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+example.com.         3600  IN  SOA master.example.com. admin.example.com. 2011112001 3600 1800 2419200 7200
 example.com.         3600  IN  NS  a.dns.example.com.
 a.dns.example.com.   3600  IN  A    192.0.2.1
 a.dns.example.com.   7200  IN  A    192.0.2.2
diff --git a/src/bin/xfrout/tests/testdata/test.sqlite3 b/src/bin/xfrout/tests/testdata/test.sqlite3
index af491f5..9eb14f1 100644
Binary files a/src/bin/xfrout/tests/testdata/test.sqlite3 and b/src/bin/xfrout/tests/testdata/test.sqlite3 differ
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index a40359a..37e8993 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -22,6 +22,7 @@ from isc.testutils.tsigctx_mock import MockTSIGContext
 from isc.cc.session import *
 import isc.config
 from isc.dns import *
+from isc.testutils.rrset_utils import *
 from xfrout import *
 import xfrout
 import isc.log
@@ -30,6 +31,16 @@ import isc.acl.dns
 TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
 TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
 
+#
+# Commonly used (mostly constant) test parameters
+#
+TEST_ZONE_NAME_STR = "example.com."
+TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
+TEST_RRCLASS = RRClass.IN()
+IXFR_OK_VERSION = 2011111802
+IXFR_NG_VERSION = 2011112800
+SOA_CURRENT_VERSION = 2011112001
+
 # our fake socket, where we can read and insert messages
 class MySocket():
     def __init__(self, family, type):
@@ -69,6 +80,38 @@ class MockDataSrcClient:
     def __init__(self, type, config):
         pass
 
+    def find_zone(self, zone_name):
+        '''Mock version of find_zone().
+
+        It returns itself (subsequently acting as a mock ZoneFinder) for
+        some test zone names.  For a special name it returns NOTFOUND to
+        emulate the condition where the specified zone doen't exist.
+
+        '''
+        self._zone_name = zone_name
+        if zone_name == Name('notauth.example.com'):
+            return (isc.datasrc.DataSourceClient.NOTFOUND, None)
+        return (isc.datasrc.DataSourceClient.SUCCESS, self)
+
+    def find(self, name, rrtype, target, options):
+        '''Mock ZoneFinder.find().
+
+        (At the moment) this method only handles query for type SOA.
+        By default it returns a normal SOA RR(set) whose owner name is
+        the query name  It also emulates some unusual cases for special
+        zone names.
+
+        '''
+        if name == Name('nosoa.example.com') and rrtype == RRType.SOA():
+            return (ZoneFinder.NXDOMAIN, None)
+        elif name == Name('multisoa.example.com') and rrtype == RRType.SOA():
+            soa_rrset = create_soa(SOA_CURRENT_VERSION)
+            soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
+            return (ZoneFinder.SUCCESS, soa_rrset)
+        elif rrtype == RRType.SOA():
+            return (ZoneFinder.SUCCESS, create_soa(SOA_CURRENT_VERSION))
+        raise ValueError('Unexpected input to mock finder: bug in test case?')
+
     def get_iterator(self, zone_name, adjust_ttl=False):
         if zone_name == Name('notauth.example.com'):
             raise isc.datasrc.Error('no such zone')
@@ -78,19 +121,20 @@ class MockDataSrcClient:
     def get_soa(self):  # emulate ZoneIterator.get_soa()
         if self._zone_name == Name('nosoa.example.com'):
             return None
-        soa_rrset = RRset(self._zone_name, RRClass.IN(), RRType.SOA(),
-                          RRTTL(3600))
-        soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
-                                  'master.example.com. ' +
-                                  'admin.example.com. 1234 ' +
-                                  '3600 1800 2419200 7200'))
+        soa_rrset = create_soa(SOA_CURRENT_VERSION)
         if self._zone_name == Name('multisoa.example.com'):
-            soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
-                                      'master.example.com. ' +
-                                      'admin.example.com. 1300 ' +
-                                      '3600 1800 2419200 7200'))
+            soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
         return soa_rrset
 
+    def get_journal_reader(self, zone_name, begin_serial, end_serial):
+        if zone_name == Name('notauth2.example.com'):
+            return isc.datasrc.ZoneJournalReader.NO_SUCH_ZONE, None
+        if zone_name == Name('nojournal.example.com'):
+            raise isc.datasrc.NotImplemented('journaling not supported')
+        if begin_serial == IXFR_NG_VERSION:
+            return isc.datasrc.ZoneJournalReader.NO_SUCH_VERSION, None
+        return isc.datasrc.ZoneJournalReader.SUCCESS, self
+
 class MyCCSession(isc.config.ConfigData):
     def __init__(self):
         module_spec = isc.config.module_spec_from_file(
@@ -160,26 +204,43 @@ class TestXfroutSessionBase(unittest.TestCase):
         return msg.get_tsig_record() is not None
 
     def create_request_data(self, with_question=True, with_tsig=False,
-                            ixfr=False, with_soa=False):
+                            ixfr=None, qtype=None, zone_name=TEST_ZONE_NAME,
+                            soa_class=TEST_RRCLASS, num_soa=1):
+        '''Create a commonly used XFR request data.
+
+        By default the request type is AXFR; if 'ixfr' is an integer,
+        the request type will be IXFR and an SOA with the serial being
+        the value of the parameter will be included in the authority
+        section.
+
+        This method has various minor parameters only for creating bad
+        format requests for testing purposes:
+        qtype: the RR type of the question section.  By default automatically
+               determined by the value of ixfr, but could be an invalid type
+               for testing.
+        zone_name: the query (zone) name.  for IXFR, it's also used as
+                   the owner name of the SOA in the authority section.
+        soa_class: IXFR only.  The RR class of the SOA RR in the authority
+                   section.
+        num_soa: IXFR only.  The number of SOA RDATAs in the authority
+                 section.
+        '''
         msg = Message(Message.RENDER)
         query_id = 0x1035
         msg.set_qid(query_id)
         msg.set_opcode(Opcode.QUERY())
         msg.set_rcode(Rcode.NOERROR())
+        req_type = RRType.AXFR() if ixfr is None else RRType.IXFR()
         if with_question:
-            if ixfr:
-                msg.add_question(Question(Name("example.com"), RRClass.IN(),
-                                          RRType.IXFR()))
-            else:
-                msg.add_question(Question(Name("example.com"), RRClass.IN(),
-                                          RRType.AXFR()))
-
-        if with_soa:
-            soa_rrset = RRset(Name('example.com'), RRClass.IN(), RRType.SOA(),
-                              RRTTL(0))
-            soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
-                                '. . 1 0 0 0 0'))
-            msg.add_rrset(Message.SECTION_AUTHORITY, soa_rrset)
+            msg.add_question(Question(zone_name, RRClass.IN(),
+                                      req_type if qtype is None else qtype))
+        if req_type == RRType.IXFR():
+            soa = RRset(zone_name, soa_class, RRType.SOA(), RRTTL(0))
+            # In the RDATA only the serial matters.
+            for i in range(0, num_soa):
+                soa.add_rdata(Rdata(RRType.SOA(), soa_class,
+                                    'm r ' + str(ixfr) + ' 1 1 1 1'))
+            msg.add_rrset(Message.SECTION_AUTHORITY, soa)
 
         renderer = MessageRenderer()
         if with_tsig:
@@ -190,6 +251,13 @@ class TestXfroutSessionBase(unittest.TestCase):
         request_data = renderer.get_data()
         return request_data
 
+    def set_request_type(self, type):
+        self.xfrsess._request_type = type
+        if type == RRType.AXFR():
+            self.xfrsess._request_typestr = 'AXFR'
+        else:
+            self.xfrsess._request_typestr = 'IXFR'
+
     def setUp(self):
         self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
         self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(),
@@ -200,13 +268,9 @@ class TestXfroutSessionBase(unittest.TestCase):
                                        isc.acl.dns.REQUEST_LOADER.load(
                                            [{"action": "ACCEPT"}]),
                                        {})
+        self.set_request_type(RRType.AXFR()) # test AXFR by default
         self.mdata = self.create_request_data()
-        self.soa_rrset = RRset(Name('example.com'), RRClass.IN(), RRType.SOA(),
-                               RRTTL(3600))
-        self.soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
-                                       'master.Example.com. ' +
-                                       'admin.exAmple.com. ' +
-                                       '1234 3600 1800 2419200 7200'))
+        self.soa_rrset = create_soa(SOA_CURRENT_VERSION)
         # some test replaces a module-wide function.  We should ensure the
         # original is used elsewhere.
         self.orig_get_rrset_len = xfrout.get_rrset_len
@@ -234,7 +298,7 @@ class TestXfroutSession(TestXfroutSessionBase):
         # set up a bogus request, which should result in FORMERR. (it only
         # has to be something that is different from the previous case)
         self.xfrsess._request_data = \
-            self.create_request_data(with_question=False)
+            self.create_request_data(ixfr=IXFR_OK_VERSION, num_soa=2)
         # Replace the data source client to avoid datasrc related exceptions
         self.xfrsess.ClientClass = MockDataSrcClient
         XfroutSession._handle(self.xfrsess)
@@ -253,21 +317,27 @@ class TestXfroutSession(TestXfroutSessionBase):
         XfroutSession._handle(self.xfrsess)
 
     def test_parse_query_message(self):
+        # Valid AXFR
         [get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
+        self.assertEqual(RRType.AXFR(), self.xfrsess._request_type)
         self.assertEqual(get_rcode.to_text(), "NOERROR")
 
-        # Broken request: no question
-        request_data = self.create_request_data(with_question=False)
+        # Valid IXFR
+        request_data = self.create_request_data(ixfr=2011111801)
         rcode, msg = self.xfrsess._parse_query_message(request_data)
-        self.assertEqual(Rcode.FORMERR(), rcode)
+        self.assertEqual(RRType.IXFR(), self.xfrsess._request_type)
+        self.assertEqual(Rcode.NOERROR(), rcode)
 
-        # Broken request: IXFR without SOA
-        request_data = self.create_request_data(ixfr=True)
-        rcode, msg = self.xfrsess._parse_query_message(request_data)
-        self.assertEqual(Rcode.FORMERR(), rcode)
+        # Broken request: no question
+        self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
+                          self.create_request_data(with_question=False))
+
+        # Broken request: invalid RR type (neither AXFR nor IXFR)
+        self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
+                          self.create_request_data(qtype=RRType.A()))
 
         # NOERROR
-        request_data = self.create_request_data(ixfr=True, with_soa=True)
+        request_data = self.create_request_data(ixfr=IXFR_OK_VERSION)
         rcode, msg = self.xfrsess._parse_query_message(request_data)
         self.assertEqual(rcode.to_text(), "NOERROR")
 
@@ -458,7 +528,7 @@ class TestXfroutSession(TestXfroutSessionBase):
                                RRTTL(3600))
         soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
                                   'master.Example.com. admin.exAmple.com. ' +
-                                  '1234 3600 1800 2419200 7200'))
+                                  '2011112001 3600 1800 2419200 7200'))
         msg.add_rrset(Message.SECTION_ANSWER, soa_rrset)
         self.xfrsess._send_message(self.sock, msg)
         send_out_data = self.sock.readsent()[2:]
@@ -492,34 +562,28 @@ class TestXfroutSession(TestXfroutSessionBase):
         msg = self.getmsg()
         msg.make_response()
 
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
         self.xfrsess._send_message_with_last_soa(msg, self.sock,
-                                                 self.soa_rrset, 0,
-                                                 packet_neet_not_sign)
+                                                 self.soa_rrset, 0)
         get_msg = self.sock.read_msg()
-        # tsig context is not exist
+        # tsig context does not exist
         self.assertFalse(self.message_has_tsig(get_msg))
 
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
 
-        #answer_rrset_iter = section_iter(get_msg, section.ANSWER())
-        answer = get_msg.get_section(Message.SECTION_ANSWER)[0]#answer_rrset_iter.get_rrset()
+        answer = get_msg.get_section(Message.SECTION_ANSWER)[0]
         self.assertEqual(answer.get_name().to_text(), "example.com.")
         self.assertEqual(answer.get_class(), RRClass("IN"))
         self.assertEqual(answer.get_type().to_text(), "SOA")
         rdata = answer.get_rdata()
         self.assertEqual(rdata[0], self.soa_rrset.get_rdata()[0])
 
-        # msg is the TSIG_SIGN_EVERY_NTH one
-        # sending the message with last soa together
+        # Sending the message with last soa together
         self.xfrsess._send_message_with_last_soa(msg, self.sock,
-                                                 self.soa_rrset, 0,
-                                                 TSIG_SIGN_EVERY_NTH)
+                                                 self.soa_rrset, 0)
         get_msg = self.sock.read_msg()
-        # tsig context is not exist
+        # tsig context does not exist
         self.assertFalse(self.message_has_tsig(get_msg))
 
     def test_send_message_with_last_soa_with_tsig(self):
@@ -529,13 +593,9 @@ class TestXfroutSession(TestXfroutSessionBase):
         msg = self.getmsg()
         msg.make_response()
 
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
-        # msg is not the TSIG_SIGN_EVERY_NTH one
-        # sending the message with last soa together
+        # Sending the message with last soa together
         self.xfrsess._send_message_with_last_soa(msg, self.sock,
-                                                 self.soa_rrset, 0,
-                                                 packet_neet_not_sign)
+                                                 self.soa_rrset, 0)
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
 
@@ -543,14 +603,6 @@ class TestXfroutSession(TestXfroutSessionBase):
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
 
-        # msg is the TSIG_SIGN_EVERY_NTH one
-        # sending the message with last soa together
-        self.xfrsess._send_message_with_last_soa(msg, self.sock,
-                                                 self.soa_rrset, 0,
-                                                 TSIG_SIGN_EVERY_NTH)
-        get_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(get_msg))
-
     def test_trigger_send_message_with_last_soa(self):
         rrset_a = RRset(Name("example.com"), RRClass.IN(), RRType.A(), RRTTL(3600))
         rrset_a.add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
@@ -562,8 +614,6 @@ class TestXfroutSession(TestXfroutSessionBase):
         # length larger than MAX-len(rrset)
         length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
             get_rrset_len(self.soa_rrset) + 1
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
 
         # give the function a value that is larger than MAX-len(rrset)
         # this should have triggered the sending of two messages
@@ -571,8 +621,7 @@ class TestXfroutSession(TestXfroutSessionBase):
         # the sending in _with_last_soa)
         self.xfrsess._send_message_with_last_soa(msg, self.sock,
                                                  self.soa_rrset,
-                                                 length_need_split,
-                                                 packet_neet_not_sign)
+                                                 length_need_split)
         get_msg = self.sock.read_msg()
         self.assertFalse(self.message_has_tsig(get_msg))
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
@@ -592,7 +641,6 @@ class TestXfroutSession(TestXfroutSessionBase):
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
 
-        #answer_rrset_iter = section_iter(get_msg, Message.SECTION_ANSWER)
         answer = get_msg.get_section(Message.SECTION_ANSWER)[0]
         self.assertEqual(answer.get_name().to_text(), "example.com.")
         self.assertEqual(answer.get_class(), RRClass("IN"))
@@ -612,8 +660,6 @@ class TestXfroutSession(TestXfroutSessionBase):
         # length larger than MAX-len(rrset)
         length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
             get_rrset_len(self.soa_rrset) + 1
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
 
         # give the function a value that is larger than MAX-len(rrset)
         # this should have triggered the sending of two messages
@@ -621,26 +667,10 @@ class TestXfroutSession(TestXfroutSessionBase):
         # the sending in _with_last_soa)
         self.xfrsess._send_message_with_last_soa(msg, self.sock,
                                                  self.soa_rrset,
-                                                 length_need_split,
-                                                 packet_neet_not_sign)
-        get_msg = self.sock.read_msg()
-        # msg is not the TSIG_SIGN_EVERY_NTH one, it shouldn't be tsig signed
-        self.assertFalse(self.message_has_tsig(get_msg))
-        # the last packet should be tsig signed
-        get_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(get_msg))
-        # and it should not have sent anything else
-        self.assertEqual(0, len(self.sock.sendqueue))
-
-
-        # msg is the TSIG_SIGN_EVERY_NTH one, it should be tsig signed
-        self.xfrsess._send_message_with_last_soa(msg, self.sock,
-                                                 self.soa_rrset,
-                                                 length_need_split,
-                                                 xfrout.TSIG_SIGN_EVERY_NTH)
+                                                 length_need_split)
+        # Both messages should have TSIG RRs
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
-        # the last packet should be tsig signed
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
         # and it should not have sent anything else
@@ -649,16 +679,101 @@ class TestXfroutSession(TestXfroutSessionBase):
     def test_get_rrset_len(self):
         self.assertEqual(82, get_rrset_len(self.soa_rrset))
 
-    def test_check_xfrout_available(self):
+    def test_xfrout_axfr_setup(self):
         self.xfrsess.ClientClass = MockDataSrcClient
-        self.assertEqual(self.xfrsess._check_xfrout_available(
-                Name('example.com')), Rcode.NOERROR())
-        self.assertEqual(self.xfrsess._check_xfrout_available(
-                Name('notauth.example.com')), Rcode.NOTAUTH())
-        self.assertEqual(self.xfrsess._check_xfrout_available(
-                Name('nosoa.example.com')), Rcode.SERVFAIL())
-        self.assertEqual(self.xfrsess._check_xfrout_available(
-                Name('multisoa.example.com')), Rcode.SERVFAIL())
+        # Successful case.  A zone iterator should be set up.
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._iterator)
+
+        # Failure cases
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), Name('notauth.example.com'), TEST_RRCLASS),
+                         Rcode.NOTAUTH())
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), Name('nosoa.example.com'), TEST_RRCLASS),
+                         Rcode.SERVFAIL())
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), Name('multisoa.example.com'), TEST_RRCLASS),
+                         Rcode.SERVFAIL())
+
+    def test_xfrout_ixfr_setup(self):
+        self.xfrsess.ClientClass = MockDataSrcClient
+        self.set_request_type(RRType.IXFR())
+
+        # Successful case of pure IXFR.  A zone journal reader should be set
+        # up.
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._jnl_reader)
+
+        # Successful case, but as a result of falling back to AXFR-style
+        # IXFR.  A zone iterator should be set up instead of a journal reader.
+        self.mdata = self.create_request_data(ixfr=IXFR_NG_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._iterator)
+        self.assertEqual(None, self.xfrsess._jnl_reader)
+
+        # Successful case, but the requested SOA serial is equal to that of
+        # the local SOA.  Both iterator and jnl_reader should be None,
+        # indicating that the response will contain just one SOA.
+        self.mdata = self.create_request_data(ixfr=SOA_CURRENT_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertEqual(None, self.xfrsess._iterator)
+        self.assertEqual(None, self.xfrsess._jnl_reader)
+
+        # The data source doesn't support journaling.  Should fallback to AXFR.
+        zone_name = Name('nojournal.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._iterator)
+
+        # Failure cases
+        zone_name = Name('notauth.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+        # this is a strange case: zone's SOA will be found but the journal
+        # reader won't be created due to 'no such zone'.
+        zone_name = Name('notauth2.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+        zone_name = Name('nosoa.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+        zone_name = Name('multisoa.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+
+        # query name doesn't match the SOA's owner
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+
+        # query's RR class doesn't match the SOA's class
+        zone_name = TEST_ZONE_NAME # make sure the name matches this time
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              soa_class=RRClass.CH())
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+
+        # multiple SOA RRs
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              num_soa=2)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
 
     def test_dns_xfrout_start_formerror(self):
         # formerror
@@ -666,13 +781,10 @@ class TestXfroutSession(TestXfroutSessionBase):
         sent_data = self.sock.readsent()
         self.assertEqual(len(sent_data), 0)
 
-    def default(self, param):
-        return "example.com"
-
     def test_dns_xfrout_start_notauth(self):
-        def notauth(formpara):
+        def notauth(msg, name, rrclass):
             return Rcode.NOTAUTH()
-        self.xfrsess._check_xfrout_available = notauth
+        self.xfrsess._xfrout_setup = notauth
         self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
         get_msg = self.sock.read_msg()
         self.assertEqual(get_msg.get_rcode().to_text(), "NOTAUTH")
@@ -685,9 +797,9 @@ class TestXfroutSession(TestXfroutSessionBase):
         self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.SERVFAIL())
 
     def test_dns_xfrout_start_noerror(self):
-        def noerror(form):
+        def noerror(msg, name, rrclass):
             return Rcode.NOERROR()
-        self.xfrsess._check_xfrout_available = noerror
+        self.xfrsess._xfrout_setup = noerror
 
         def myreply(msg, sock):
             self.sock.send(b"success")
@@ -696,14 +808,14 @@ class TestXfroutSession(TestXfroutSessionBase):
         self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
         self.assertEqual(self.sock.readsent(), b"success")
 
-    def test_reply_xfrout_query_noerror(self):
+    def test_reply_xfrout_query_axfr(self):
         self.xfrsess._soa = self.soa_rrset
         self.xfrsess._iterator = [self.soa_rrset]
         self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
         reply_msg = self.sock.read_msg()
         self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 2)
 
-    def test_reply_xfrout_query_noerror_with_tsig(self):
+    def test_reply_xfrout_query_axfr_with_tsig(self):
         rrset = RRset(Name('a.example.com'), RRClass.IN(), RRType.A(),
                       RRTTL(3600))
         rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), '192.0.2.1'))
@@ -719,28 +831,51 @@ class TestXfroutSession(TestXfroutSessionBase):
         self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
         self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
 
-        # tsig signed first package
-        reply_msg = self.sock.read_msg()
-        self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
-        self.assertTrue(self.message_has_tsig(reply_msg))
-        # (TSIG_SIGN_EVERY_NTH - 1) packets have no tsig
-        for i in range(0, xfrout.TSIG_SIGN_EVERY_NTH - 1):
-            reply_msg = self.sock.read_msg()
-            self.assertFalse(self.message_has_tsig(reply_msg))
-        # TSIG_SIGN_EVERY_NTH packet has tsig
-        reply_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(reply_msg))
-
-        for i in range(0, 100 - TSIG_SIGN_EVERY_NTH):
+        # All messages must have TSIG as we don't support the feature of
+        # skipping intermediate TSIG records (with bulk signing).
+        for i in range(0, 102): # 102 = all 100 RRs from iterator and 2 SOAs
             reply_msg = self.sock.read_msg()
-            self.assertFalse(self.message_has_tsig(reply_msg))
-        # tsig signed last package
-        reply_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(reply_msg))
+            # With the hack of get_rrset_len() above, every message must have
+            # exactly one RR in the answer section.
+            self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
+            self.assertTrue(self.message_has_tsig(reply_msg))
 
         # and it should not have sent anything else
         self.assertEqual(0, len(self.sock.sendqueue))
 
+    def test_reply_xfrout_query_ixfr(self):
+        # Creating a pure (incremental) IXFR response.  Intermediate SOA
+        # RRs won't be skipped.
+        self.xfrsess._soa = create_soa(SOA_CURRENT_VERSION)
+        self.xfrsess._iterator = [create_soa(IXFR_OK_VERSION),
+                                  create_a(Name('a.example.com'), '192.0.2.2'),
+                                  create_soa(SOA_CURRENT_VERSION),
+                                  create_aaaa(Name('a.example.com'),
+                                              '2001:db8::1')]
+        self.xfrsess._jnl_reader = self.xfrsess._iterator
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
+        actual_records = reply_msg.get_section(Message.SECTION_ANSWER)
+
+        expected_records = self.xfrsess._iterator[:]
+        expected_records.insert(0, create_soa(SOA_CURRENT_VERSION))
+        expected_records.append(create_soa(SOA_CURRENT_VERSION))
+
+        self.assertEqual(len(expected_records), len(actual_records))
+        for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+            self.assertTrue(expected_rr, actual_rr)
+
+    def test_reply_xfrout_query_ixfr_soa_only(self):
+        # Creating an IXFR response that contains only one RR, which is the
+        # SOA of the current version.
+        self.xfrsess._soa = create_soa(SOA_CURRENT_VERSION)
+        self.xfrsess._iterator = None
+        self.xfrsess._jnl_reader = None
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
+        answer = reply_msg.get_section(Message.SECTION_ANSWER)
+        self.assertEqual(1, len(answer))
+        self.assertTrue(create_soa(SOA_CURRENT_VERSION), answer[0])
 
 class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
     '''Tests for XFR-out sessions using an SQLite3 DB.
@@ -756,19 +891,72 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
         self.xfrsess._request_data = self.mdata
         self.xfrsess._server.get_db_file = lambda : TESTDATA_SRCDIR + \
             'test.sqlite3'
+        self.ns_name = 'a.dns.example.com'
+
+    def check_axfr_stream(self, response):
+        '''Common checks for AXFR(-style) response for the test zone.
+        '''
+        # This zone contains two A RRs for the same name with different TTLs.
+        # These TTLs should be preseved in the AXFR stream.
+        actual_records = response.get_section(Message.SECTION_ANSWER)
+        expected_records = [create_soa(2011112001),
+                            create_ns(self.ns_name),
+                            create_a(Name(self.ns_name), '192.0.2.1', 3600),
+                            create_a(Name(self.ns_name), '192.0.2.2', 7200),
+                            create_soa(2011112001)]
+        self.assertEqual(len(expected_records), len(actual_records))
+        for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+            self.assertTrue(expected_rr, actual_rr)
 
     def test_axfr_normal_session(self):
         XfroutSession._handle(self.xfrsess)
         response = self.sock.read_msg(Message.PRESERVE_ORDER);
         self.assertEqual(Rcode.NOERROR(), response.get_rcode())
-        # This zone contains two A RRs for the same name with different TTLs.
-        # These TTLs should be preseved in the AXFR stream.
-        actual_ttls = []
-        for rr in response.get_section(Message.SECTION_ANSWER):
-            if rr.get_type() == RRType.A() and \
-                    not rr.get_ttl() in actual_ttls:
-                actual_ttls.append(rr.get_ttl().get_value())
-        self.assertEqual([3600, 7200], sorted(actual_ttls))
+        self.check_axfr_stream(response)
+
+    def test_ixfr_to_axfr(self):
+        self.xfrsess._request_data = \
+            self.create_request_data(ixfr=IXFR_NG_VERSION)
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+        # This is an AXFR-style IXFR.  So the question section should indicate
+        # that it's an IXFR resposne.
+        self.assertEqual(RRType.IXFR(), response.get_question()[0].get_type())
+        self.check_axfr_stream(response)
+
+    def test_ixfr_normal_session(self):
+        # See testdata/creatediff.py.  There are 8 changes between two
+        # versions.  So the answer section should contain all of these and
+        # two beginning and trailing SOAs.
+        self.xfrsess._request_data = \
+            self.create_request_data(ixfr=IXFR_OK_VERSION)
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        actual_records = response.get_section(Message.SECTION_ANSWER)
+        expected_records = [create_soa(2011112001), create_soa(2011111802),
+                            create_soa(2011111900),
+                            create_a(Name(self.ns_name), '192.0.2.2', 7200),
+                            create_soa(2011111900),
+                            create_a(Name(self.ns_name), '192.0.2.53'),
+                            create_aaaa(Name(self.ns_name), '2001:db8::1'),
+                            create_soa(2011112001),
+                            create_a(Name(self.ns_name), '192.0.2.1'),
+                            create_soa(2011112001)]
+        self.assertEqual(len(expected_records), len(actual_records))
+        for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+            self.assertTrue(expected_rr, actual_rr)
+
+    def test_ixfr_soa_only(self):
+        # The requested SOA serial is the latest one.  The response should
+        # contain exactly one SOA of that serial.
+        self.xfrsess._request_data = \
+            self.create_request_data(ixfr=SOA_CURRENT_VERSION)
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        answers = response.get_section(Message.SECTION_ANSWER)
+        self.assertEqual(1, len(answers))
+        self.assertTrue(create_soa(SOA_CURRENT_VERSION), answers[0])
 
 class MyUnixSockServer(UnixSockServer):
     def __init__(self):
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index 8c66225..d450138 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -22,7 +22,7 @@ import isc.cc
 import threading
 import struct
 import signal
-from isc.datasrc import DataSourceClient
+from isc.datasrc import DataSourceClient, ZoneFinder, ZoneJournalReader
 from socketserver import *
 import os
 from isc.config.ccsession import *
@@ -47,7 +47,7 @@ try:
 except ImportError as e:
     # C++ loadable module may not be installed; even so the xfrout process
     # must keep running, so we warn about it and move forward.
-    log.error(XFROUT_IMPORT, str(e))
+    logger.error(XFROUT_IMPORT, str(e))
 
 from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
 from isc.acl.dns import REQUEST_LOADER
@@ -93,9 +93,6 @@ init_paths()
 SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
 AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
 VERBOSE_MODE = False
-# tsig sign every N axfr packets.
-TSIG_SIGN_EVERY_NTH = 96
-
 XFROUT_MAX_MESSAGE_SIZE = 65535
 
 # borrowed from xfrin.py @ #1298.  We should eventually unify it.
@@ -106,7 +103,7 @@ def format_zone_str(zone_name, zone_class):
        zone_name (isc.dns.Name) name to format
        zone_class (isc.dns.RRClass) class to format
     """
-    return zone_name.to_text() + '/' + str(zone_class)
+    return zone_name.to_text(True) + '/' + str(zone_class)
 
 # borrowed from xfrin.py @ #1298.
 def format_addrinfo(addrinfo):
@@ -136,6 +133,11 @@ def get_rrset_len(rrset):
     rrset.to_wire(bytes)
     return len(bytes)
 
+def get_soa_serial(soa_rdata):
+    '''Extract the serial field of an SOA RDATA and returns it as an intger.
+    (borrowed from xfrin)
+    '''
+    return int(soa_rdata.to_text().split()[2])
 
 class XfroutSession():
     def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
@@ -147,11 +149,13 @@ class XfroutSession():
         self._tsig_ctx = None
         self._tsig_len = 0
         self._remote = remote
-        self._request_type = 'AXFR' # could be IXFR when we support it
+        self._request_type = None
+        self._request_typestr = None
         self._acl = default_acl
         self._zone_config = zone_config
         self.ClientClass = client_class # parameterize this for testing
-        self._soa = None # will be set in _check_xfrout_available or in tests
+        self._soa = None # will be set in _xfrout_setup or in tests
+        self._jnl_reader = None # will be set to a reader for IXFR
         self._handle()
 
     def create_tsig_ctx(self, tsig_record, tsig_key_ring):
@@ -199,7 +203,8 @@ class XfroutSession():
         tsig_record = msg.get_tsig_record()
         if tsig_record is not None:
             self._tsig_len = tsig_record.get_length()
-            self._tsig_ctx = self.create_tsig_ctx(tsig_record, self._tsig_key_ring)
+            self._tsig_ctx = self.create_tsig_ctx(tsig_record,
+                                                  self._tsig_key_ring)
             tsig_error = self._tsig_ctx.verify(tsig_record, request_data)
             if tsig_error != TSIGError.NOERROR:
                 return Rcode.NOTAUTH()
@@ -222,35 +227,28 @@ class XfroutSession():
             return rcode, msg
 
         # Make sure the question is valid.  This should be ensured by
-        # the auth server, but since it's far from our xfrout itself,
-        # we check it by ourselves.
+        # the auth server, but since it's far from xfrout itself, we check
+        # it by ourselves.  A viloation would be an internal bug, so we
+        # raise and stop here rather than returning a FORMERR or SERVFAIL.
         if msg.get_rr_count(Message.SECTION_QUESTION) != 1:
-            logger.debug(DBG_XFROUT_TRACE, XFROUT_XFR_REQUEST_MISSING_QUESTION,
-                         format_addrinfo(self._remote))
-            return Rcode.FORMERR(), msg
-
-        request_type = msg.get_question()[0].get_type()
-        zone_name = msg.get_question()[0].get_name()
-        zone_class = msg.get_question()[0].get_class()
-
-        # If it is an IXFR query, there should be a SOA in the authority
-        # section too
-        if request_type == RRType.IXFR():
-            if msg.get_rr_count(Message.SECTION_AUTHORITY) != 1:
-                logger.debug(DBG_XFROUT_TRACE, XFROUT_IXFR_REQUEST_MISSING_SOA,
-                             format_addrinfo(self._remote),
-                             format_zone_str(zone_name, zone_class))
-                return Rcode.FORMERR(), msg
-            self._request_type = 'IXFR'
-        elif request_type == RRType.AXFR():
-            self._request_type = 'AXFR'
+            raise RuntimeError('Invalid number of question for XFR: ' +
+                               str(msg.get_rr_count(Message.SECTION_QUESTION)))
+        question = msg.get_question()[0]
+
+        # Identify the request type
+        self._request_type = question.get_type()
+        if self._request_type == RRType.AXFR():
+            self._request_typestr = 'AXFR'
+        elif self._request_type == RRType.IXFR():
+            self._request_typestr = 'IXFR'
         else:
-            logger.error(XFROUT_XFR_REQUEST_BAD_TYPE,
-                         format_zone_str(zone_name, zone_class),
-                         format_addrinfo(self._remote))
-            return Rcode.FORMERR(), msg
+            # Likewise, this should be impossible.
+            raise RuntimeError('Unexpected XFR type: ' +
+                               str(self._request_type))
 
         # ACL checks
+        zone_name = question.get_name()
+        zone_class = question.get_class()
         acl = self._get_transfer_acl(zone_name, zone_class)
         acl_result = acl.execute(
             isc.acl.dns.RequestContext(self._remote[2], msg.get_tsig_record()))
@@ -320,23 +318,33 @@ class XfroutSession():
         msg.set_rcode(rcode_)
         self._send_message(sock_fd, msg, self._tsig_ctx)
 
-    def _check_xfrout_available(self, zone_name):
-        '''Check if xfr request can be responsed.
-           TODO, Get zone's configuration from cfgmgr or some other place
-           eg. check allow_transfer setting,
+    def _get_zone_soa(self, zone_name):
+        '''Retrieve the SOA RR of the given zone.
+
+        It returns a pair of RCODE and the SOA (in the form of RRset).
+        On success RCODE is NOERROR and returned SOA is not None;
+        on failure RCODE indicates the appropriate code in the context of
+        xfr processing, and the returned SOA is None.
 
         '''
+        result, finder = self._datasrc_client.find_zone(zone_name)
+        if result != DataSourceClient.SUCCESS:
+            return (Rcode.NOTAUTH(), None)
+        result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
+                                        ZoneFinder.FIND_DEFAULT)
+        if result != ZoneFinder.SUCCESS:
+            return (Rcode.SERVFAIL(), None)
+        # Especially for database-based zones, a working zone may be in
+        # a broken state where it has more than one SOA RR.  We proactively
+        # check the condition and abort the xfr attempt if we identify it.
+        if soa_rrset.get_rdata_count() != 1:
+            return (Rcode.SERVFAIL(), None)
+        return (Rcode.NOERROR(), soa_rrset)
+
+    def __axfr_setup(self, zone_name):
+        '''Setup a zone iterator for AXFR or AXFR-style IXFR.
 
-        # Identify the data source for the requested zone and see if it has
-        # SOA while initializing objects used for request processing later.
-        # We should eventually generalize this so that we can choose the
-        # appropriate data source from (possible) multiple candidates.
-        # We should eventually take into account the RR class here.
-        # For now, we  hardcode a particular type (SQLite3-based), and only
-        # consider that one.
-        datasrc_config = '{ "database_file": "' + \
-            self._server.get_db_file() + '"}'
-        self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
+        '''
         try:
             # Note that we enable 'separate_rrs'.  In xfr-out we need to
             # preserve as many things as possible (even if it's half broken)
@@ -361,6 +369,112 @@ class XfroutSession():
 
         return Rcode.NOERROR()
 
+    def __ixfr_setup(self, request_msg, zone_name, zone_class):
+        '''Setup a zone journal reader for IXFR.
+
+        If the underlying data source does not know the requested range
+        of zone differences it automatically falls back to AXFR-style
+        IXFR by setting up a zone iterator instead of a journal reader.
+
+        '''
+        # Check the authority section.  Look for a SOA record with
+        # the same name and class as the question.
+        remote_soa = None
+        for auth_rrset in request_msg.get_section(Message.SECTION_AUTHORITY):
+            # Ignore data whose owner name is not the zone apex, and
+            # ignore non-SOA or different class of records.
+            if auth_rrset.get_name() != zone_name or \
+                    auth_rrset.get_type() != RRType.SOA() or \
+                    auth_rrset.get_class() != zone_class:
+                continue
+            if auth_rrset.get_rdata_count() != 1:
+                logger.info(XFROUT_IXFR_MULTIPLE_SOA,
+                            format_addrinfo(self._remote))
+                return Rcode.FORMERR()
+            remote_soa = auth_rrset
+        if remote_soa is None:
+            logger.info(XFROUT_IXFR_NO_SOA, format_addrinfo(self._remote))
+            return Rcode.FORMERR()
+
+        # Retrieve the local SOA
+        rcode, self._soa = self._get_zone_soa(zone_name)
+        if rcode != Rcode.NOERROR():
+            return rcode
+
+        # RFC1995 says "If an IXFR query with the same or newer version
+        # number than that of the server is received, it is replied to with
+        # a single SOA record of the server's current version, just as
+        # in AXFR".  The claim about AXFR is incorrect, but other than that,
+        # we do as the RFC says.
+        # Note: until we complete #1278 we can only check equality of the
+        # two serials.  The "newer version" case would fall back to AXFR-style.
+        begin_serial = get_soa_serial(remote_soa.get_rdata()[0])
+        end_serial = get_soa_serial(self._soa.get_rdata()[0])
+        if begin_serial == end_serial:
+            # clear both iterator and jnl_reader to signal we won't do
+            # iteration in response generation
+            self._iterator = None
+            self._jnl_reader = None
+            logger.info(XFROUT_IXFR_UPTODATE, format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class),
+                        begin_serial, end_serial)
+            return Rcode.NOERROR()
+
+        # Set up the journal reader or fall back to AXFR-style IXFR
+        try:
+            code, self._jnl_reader = self._datasrc_client.get_journal_reader(
+                zone_name, begin_serial, end_serial)
+        except isc.datasrc.NotImplemented as ex:
+            # The underlying data source doesn't support journaling.
+            # Fall back to AXFR-style IXFR.
+            logger.info(XFROUT_IXFR_NO_JOURNAL_SUPPORT,
+                        format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class))
+            return self.__axfr_setup(zone_name)
+        if code == ZoneJournalReader.NO_SUCH_VERSION:
+            logger.info(XFROUT_IXFR_NO_VERSION, format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class),
+                        begin_serial, end_serial)
+            return self.__axfr_setup(zone_name)
+        if code == ZoneJournalReader.NO_SUCH_ZONE:
+            # this is quite unexpected as we know zone's SOA exists.
+            # It might be a bug or the data source is somehow broken,
+            # but it can still happen if someone has removed the zone
+            # between these two operations.  We treat it as NOTAUTH.
+            logger.warn(XFROUT_IXFR_NO_ZONE, format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class))
+            return Rcode.NOTAUTH()
+
+        # Use the reader as the iterator to generate the response.
+        self._iterator = self._jnl_reader
+
+        return Rcode.NOERROR()
+
+    def _xfrout_setup(self, request_msg, zone_name, zone_class):
+        '''Setup a context for xfr responses according to the request type.
+
+        This method identifies the most appropriate data source for the
+        request and set up a zone iterator or journal reader depending on
+        whether the request is AXFR or IXFR.  If it identifies any protocol
+        level error it returns an RCODE other than NOERROR.
+
+        '''
+
+        # Identify the data source for the requested zone and see if it has
+        # SOA while initializing objects used for request processing later.
+        # We should eventually generalize this so that we can choose the
+        # appropriate data source from (possible) multiple candidates.
+        # We should eventually take into account the RR class here.
+        # For now, we hardcode a particular type (SQLite3-based), and only
+        # consider that one.
+        datasrc_config = '{ "database_file": "' + \
+            self._server.get_db_file() + '"}'
+        self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
+
+        if self._request_type == RRType.AXFR():
+            return self.__axfr_setup(zone_name)
+        else:
+            return self.__ixfr_setup(request_msg, zone_name, zone_class)
 
     def dns_xfrout_start(self, sock_fd, msg_query, quota_ok=True):
         rcode_, msg = self._parse_query_message(msg_query)
@@ -373,7 +487,7 @@ class XfroutSession():
             return self._reply_query_with_error_rcode(msg, sock_fd,
                                                       Rcode.FORMERR())
         elif not quota_ok:
-            logger.warn(XFROUT_QUERY_QUOTA_EXCCEEDED, self._request_type,
+            logger.warn(XFROUT_QUERY_QUOTA_EXCCEEDED, self._request_typestr,
                         format_addrinfo(self._remote),
                         self._server._max_transfers_out)
             return self._reply_query_with_error_rcode(msg, sock_fd,
@@ -384,27 +498,25 @@ class XfroutSession():
         zone_class = question.get_class()
         zone_str = format_zone_str(zone_name, zone_class) # for logging
 
-        # TODO: we should also include class in the check
         try:
-            rcode_ = self._check_xfrout_available(zone_name)
+            rcode_ = self._xfrout_setup(msg, zone_name, zone_class)
         except Exception as ex:
-            logger.error(XFROUT_XFR_TRANSFER_CHECK_ERROR, self._request_type,
+            logger.error(XFROUT_XFR_TRANSFER_CHECK_ERROR, self._request_typestr,
                          format_addrinfo(self._remote), zone_str, ex)
             rcode_ = Rcode.SERVFAIL()
         if rcode_ != Rcode.NOERROR():
-            logger.info(XFROUT_XFR_TRANSFER_FAILED, self._request_type,
+            logger.info(XFROUT_XFR_TRANSFER_FAILED, self._request_typestr,
                         format_addrinfo(self._remote), zone_str, rcode_)
             return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
 
         try:
-            logger.info(XFROUT_XFR_TRANSFER_STARTED, self._request_type,
+            logger.info(XFROUT_XFR_TRANSFER_STARTED, self._request_typestr,
                         format_addrinfo(self._remote), zone_str)
             self._reply_xfrout_query(msg, sock_fd)
         except Exception as err:
-            logger.error(XFROUT_XFR_TRANSFER_ERROR, self._request_type,
+            logger.error(XFROUT_XFR_TRANSFER_ERROR, self._request_typestr,
                     format_addrinfo(self._remote), zone_str, err)
-            pass
-        logger.info(XFROUT_XFR_TRANSFER_DONE, self._request_type,
+        logger.info(XFROUT_XFR_TRANSFER_DONE, self._request_typestr,
                     format_addrinfo(self._remote), zone_str)
 
     def _clear_message(self, msg):
@@ -420,44 +532,45 @@ class XfroutSession():
         msg.set_header_flag(Message.HEADERFLAG_QR)
         return msg
 
-    def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa, message_upper_len,
-                                    count_since_last_tsig_sign):
+    def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa,
+                                    message_upper_len):
         '''Add the SOA record to the end of message. If it can't be
         added, a new message should be created to send out the last soa .
         '''
-        rrset_len = get_rrset_len(rrset_soa)
-
-        if (count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH and
-            message_upper_len + rrset_len >= XFROUT_MAX_MESSAGE_SIZE):
-            # If tsig context exist, sign the packet with serial number TSIG_SIGN_EVERY_NTH
+        if (message_upper_len + self._tsig_len + get_rrset_len(rrset_soa) >=
+            XFROUT_MAX_MESSAGE_SIZE):
             self._send_message(sock_fd, msg, self._tsig_ctx)
             msg = self._clear_message(msg)
-        elif (count_since_last_tsig_sign != TSIG_SIGN_EVERY_NTH and
-              message_upper_len + rrset_len + self._tsig_len >= XFROUT_MAX_MESSAGE_SIZE):
-            self._send_message(sock_fd, msg)
-            msg = self._clear_message(msg)
 
         # If tsig context exist, sign the last packet
         msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
         self._send_message(sock_fd, msg, self._tsig_ctx)
 
-
     def _reply_xfrout_query(self, msg, sock_fd):
         #TODO, there should be a better way to insert rrset.
-        count_since_last_tsig_sign = TSIG_SIGN_EVERY_NTH
         msg.make_response()
         msg.set_header_flag(Message.HEADERFLAG_AA)
-        msg.add_rrset(Message.SECTION_ANSWER, self._soa)
 
+        # If the iterator is None, we are responding to IXFR with a single
+        # SOA RR.
+        if self._iterator is None:
+            self._send_message_with_last_soa(msg, sock_fd, self._soa, 0)
+            return
+
+        # Add the beginning SOA
+        msg.add_rrset(Message.SECTION_ANSWER, self._soa)
         message_upper_len = get_rrset_len(self._soa) + self._tsig_len
 
+        # Add the rest of the zone/diff contets
         for rrset in self._iterator:
             # Check if xfrout is shutdown
             if  self._server._shutdown_event.is_set():
                 logger.info(XFROUT_STOPPING)
                 return
 
-            if rrset.get_type() == RRType.SOA():
+            # For AXFR (or AXFR-style IXFR), in which case _jnl_reader is None,
+            # we should skip SOAs from the iterator.
+            if self._jnl_reader is None and rrset.get_type() == RRType.SOA():
                 continue
 
             # We calculate the maximum size of the RRset (i.e. the
@@ -469,27 +582,18 @@ class XfroutSession():
                 message_upper_len += rrset_len
                 continue
 
-            # If tsig context exist, sign every N packets
-            if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
-                count_since_last_tsig_sign = 0
-                self._send_message(sock_fd, msg, self._tsig_ctx)
-            else:
-                self._send_message(sock_fd, msg)
+            self._send_message(sock_fd, msg, self._tsig_ctx)
 
-            count_since_last_tsig_sign += 1
             msg = self._clear_message(msg)
             # Add the RRset to the new message
             msg.add_rrset(Message.SECTION_ANSWER, rrset)
 
             # Reserve tsig space for signed packet
-            if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
-                message_upper_len = rrset_len + self._tsig_len
-            else:
-                message_upper_len = rrset_len
+            message_upper_len = rrset_len + self._tsig_len
 
+        # Add and send the trailing SOA
         self._send_message_with_last_soa(msg, sock_fd, self._soa,
-                                         message_upper_len,
-                                         count_since_last_tsig_sign)
+                                         message_upper_len)
 
 class UnixSockServer(socketserver_mixin.NoPollMixIn,
                      ThreadingUnixStreamServer):
@@ -563,7 +667,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
             try:
                 self.process_request(request)
             except Exception as pre:
-                log.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
+                logger.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
                 break
 
     def _handle_request_noblock(self):
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index 49662e6..fcc2e59 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -56,13 +56,6 @@ are missing on the system, or the PYTHONPATH variable is not correct.
 The specific place where this library needs to be depends on your
 system and your specific installation.
 
-% XFROUT_IXFR_REQUEST_MISSING_SOA IXFR client %1: request packet for %2 did not contain SOA RR
-An IXFR request arrived, but it did not contain a SOA RR in its authority
-section. The xfrout daemon will respond with a FORMERR.
-
-% XFROUT_IXFR_TRANSFER_STARTED %1 client %2: IXFR transfer of zone %3 has started
-An incremental transfer out of the given zone has started.
-
 % XFROUT_NEW_CONFIG Update xfrout configuration
 New configuration settings have been sent from the configuration
 manager. The xfrout daemon will now apply them.
@@ -154,16 +147,6 @@ on, but the file is in use. The most likely cause is that another
 xfrout daemon process is still running. This xfrout daemon (the one
 printing this message) will not start.
 
-% XFROUT_XFR_REQUEST_BAD_TYPE bad question type in transfer request for %1 from %2
-A transfer request for the given zone arrived, but the RR in the question
-section was not of type AXFR or IXFR. This request should not even have
-reached the xfrout daemon, and there appears to be a problem in the module
-that passed it on, please file a bug report if this error is encountered.
-
-% XFROUT_XFR_REQUEST_MISSING_QUESTION empty question section in transfer request from %1
-A transfer request from the given client did not contain a question section.
-The xfrout daemon will respond with a FORMERR.
-
 % XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete
 The transfer of the given zone has been completed successfully, or was
 aborted due to a shutdown event.
@@ -194,3 +177,42 @@ Xfrout/max_transfers_out, has been reached).
 
 % XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started
 A transfer out of the given zone has started.
+
+% XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs
+An IXFR request was received with more than one SOA RRs in the authority
+section.  The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+
+% XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+
+% XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR
+An IXFR request was received but the underlying data source did
+not support journaling.  The xfrout daemon fell back to AXFR-style
+IXFR.
+
+% XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server.  The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic.  This will soon
+be implemented.
+
+% XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR
+An IXFR request was received, but the requested range of differences
+were not found in the data source.  The xfrout daemon fell back to
+AXFR-style IXFR.
+
+% XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source.  This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone.  The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index 18d04f7..24c8850 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -15,6 +15,8 @@
 #ifndef __DATA_SOURCE_CLIENT_H
 #define __DATA_SOURCE_CLIENT_H 1
 
+#include <utility>
+
 #include <boost/noncopyable.hpp>
 #include <boost/shared_ptr.hpp>
 
@@ -311,6 +313,55 @@ public:
     virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
                                       bool replace, bool journaling = false)
         const = 0;
+
+    /// Return a journal reader to retrieve differences of a zone.
+    ///
+    /// A derived version of this method creates a concrete
+    /// \c ZoneJournalReader object specific to the underlying data source
+    /// for the specified name of zone and differences between the versions
+    /// specified by the beginning and ending serials of the corresponding
+    /// SOA RRs.
+    /// The RR class of the zone is the one that the client is expected to
+    /// handle (see the detailed description of this class).
+    ///
+    /// Note that the SOA serials are compared by the semantics of the serial
+    /// number arithmetic.  So, for example, \c begin_serial can be larger than
+    /// \c end_serial as bare unsigned integers.  The underlying data source
+    /// implementation is assumed to keep track of sufficient history to
+    /// identify (if exist) the corresponding difference between the specified
+    /// versions.
+    ///
+    /// This method returns the result as a pair of a result code and
+    /// a pointer to a \c ZoneJournalReader object.  On success, the result
+    /// code is \c SUCCESS and the pointer must be non NULL; otherwise
+    /// the result code is something other than \c SUCCESS and the pinter
+    /// must be NULL.
+    ///
+    /// If the specified zone is not found in the data source, the result
+    /// code is \c NO_SUCH_ZONE.
+    /// Otherwise, if specified range of difference for the zone is not found
+    /// in the data source, the result code is \c NO_SUCH_VERSION.
+    ///
+    /// Handling differences is an optional feature of data source.
+    /// If the underlying data source does not support difference handling,
+    /// this method for that type of data source can throw an exception of
+    /// class \c NotImplemented.
+    ///
+    /// \exception NotImplemented The data source does not support differences.
+    /// \exception DataSourceError Other operational errors at the data source
+    /// level.
+    ///
+    /// \param zone The name of the zone for which the difference should be
+    /// retrieved.
+    /// \param begin_serial The SOA serial of the beginning version of the
+    /// differences.
+    /// \param end_serial The SOA serial of the ending version of the
+    /// differences.
+    ///
+    /// \return A pair of result code and a pointer to \c ZoneJournalReader.
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+                     uint32_t end_serial) const = 0;
 };
 }
 }
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index 0e867ba..1bf93fc 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -13,6 +13,7 @@
 // PERFORMANCE OF THIS SOFTWARE.
 
 #include <string>
+#include <utility>
 #include <vector>
 
 #include <datasrc/database.h>
@@ -1069,5 +1070,105 @@ DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace,
     return (ZoneUpdaterPtr(new DatabaseUpdater(update_accessor, zone.second,
                                                name, rrclass_, journaling)));
 }
+
+//
+// Zone journal reader using some database system as the underlying data
+//  source.
+//
+class DatabaseJournalReader : public ZoneJournalReader {
+private:
+    // A shortcut typedef to keep the code concise.
+    typedef DatabaseAccessor Accessor;
+public:
+    DatabaseJournalReader(shared_ptr<Accessor> accessor, const Name& zone,
+                          int zone_id, const RRClass& rrclass, uint32_t begin,
+                          uint32_t end) :
+        accessor_(accessor), zone_(zone), rrclass_(rrclass),
+        begin_(begin), end_(end), finished_(false)
+    {
+        context_ = accessor_->getDiffs(zone_id, begin, end);
+    }
+    virtual ~DatabaseJournalReader() {}
+    virtual ConstRRsetPtr getNextDiff() {
+        if (finished_) {
+            isc_throw(InvalidOperation,
+                      "Diff read attempt past the end of sequence on "
+                      << accessor_->getDBName());
+        }
+
+        string data[Accessor::COLUMN_COUNT];
+        if (!context_->getNext(data)) {
+            finished_ = true;
+            LOG_DEBUG(logger, DBG_TRACE_BASIC,
+                      DATASRC_DATABASE_JOURNALREADER_END).
+                arg(zone_).arg(rrclass_).arg(accessor_->getDBName()).
+                arg(begin_).arg(end_);
+            return (ConstRRsetPtr());
+        }
+
+        try {
+            RRsetPtr rrset(new RRset(Name(data[Accessor::NAME_COLUMN]),
+                                     rrclass_,
+                                     RRType(data[Accessor::TYPE_COLUMN]),
+                                     RRTTL(data[Accessor::TTL_COLUMN])));
+            rrset->addRdata(rdata::createRdata(rrset->getType(), rrclass_,
+                                               data[Accessor::RDATA_COLUMN]));
+            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                      DATASRC_DATABASE_JOURNALREADER_NEXT).
+                arg(rrset->getName()).arg(rrset->getType()).
+                arg(zone_).arg(rrclass_).arg(accessor_->getDBName());
+            return (rrset);
+        } catch (const Exception& ex) {
+            LOG_ERROR(logger, DATASRC_DATABASE_JOURNALREADR_BADDATA).
+                arg(zone_).arg(rrclass_).arg(accessor_->getDBName()).
+                arg(begin_).arg(end_).arg(ex.what());
+            isc_throw(DataSourceError, "Failed to create RRset from diff on "
+                      << accessor_->getDBName());
+        }
+    }
+
+private:
+    shared_ptr<Accessor> accessor_;
+    const Name zone_;
+    const RRClass rrclass_;
+    Accessor::IteratorContextPtr context_;
+    const uint32_t begin_;
+    const uint32_t end_;
+    bool finished_;
+};
+
+// The JournalReader factory
+pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+DatabaseClient::getJournalReader(const isc::dns::Name& zone,
+                                 uint32_t begin_serial,
+                                 uint32_t end_serial) const
+{
+    shared_ptr<DatabaseAccessor> jnl_accessor(accessor_->clone());
+    const pair<bool, int> zoneinfo(jnl_accessor->getZone(zone.toText()));
+    if (!zoneinfo.first) {
+        return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(
+                    ZoneJournalReader::NO_SUCH_ZONE,
+                    ZoneJournalReaderPtr()));
+    }
+
+    try {
+        const pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> ret(
+            ZoneJournalReader::SUCCESS,
+            ZoneJournalReaderPtr(new DatabaseJournalReader(jnl_accessor,
+                                                           zone,
+                                                           zoneinfo.second,
+                                                           rrclass_,
+                                                           begin_serial,
+                                                           end_serial)));
+        LOG_DEBUG(logger, DBG_TRACE_BASIC,
+                  DATASRC_DATABASE_JOURNALREADER_START).arg(zone).arg(rrclass_).
+            arg(jnl_accessor->getDBName()).arg(begin_serial).arg(end_serial);
+        return (ret);
+    } catch (const NoSuchSerial&) {
+        return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(
+                    ZoneJournalReader::NO_SUCH_VERSION,
+                    ZoneJournalReaderPtr()));
+    }
+}
 }
 }
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index 2ac10e7..81e6241 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -23,6 +23,8 @@
 #include <dns/rrclass.h>
 #include <dns/rrset.h>
 
+#include <datasrc/data_source.h>
+#include <datasrc/client.h>
 #include <datasrc/client.h>
 
 #include <dns/name.h>
@@ -544,12 +546,10 @@ public:
     /// is not for the SOA RR; it passes TTL for a diff that deletes an RR
     /// while in \c deleteRecordInZone() it's omitted.  This is because
     /// the stored diffs are expected to be retrieved in the form that
-    /// \c getRecordDiffs() is expected to meet.  This means if the caller
+    /// \c getDiffs() is expected to meet.  This means if the caller
     /// wants to use this method with other update operations, it must
     /// ensure the additional information is ready when this method is called.
     ///
-    /// \note \c getRecordDiffs() is not yet implemented.
-    ///
     /// The caller of this method must ensure that the added diffs via
     /// this method in a single transaction form an IXFR-style difference
     /// sequences: Each difference sequence is a sequence of RRs:
@@ -562,7 +562,7 @@ public:
     /// an SOA RR, \c serial must be identical to the serial of that SOA).
     /// The underlying derived class implementation may or may not check
     /// this condition, but if the caller doesn't meet the condition
-    /// a subsequent call to \c getRecordDiffs() will not work as expected.
+    /// a subsequent call to \c getDiffs() will not work as expected.
     ///
     /// Any call to this method must be in a transaction, and, for now,
     /// it must be a transaction triggered by \c startUpdateZone() (that is,
@@ -932,6 +932,15 @@ public:
                                       bool replace,
                                       bool journaling = false) const;
 
+
+    /// This implementation internally clones the accessor from the one
+    /// used in the client for retrieving diffs and iterating over them.
+    /// The returned reader object will be able to work separately from
+    /// the original client.
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+                     uint32_t end_serial) const;
+
 private:
     /// \brief The RR class that this client handles.
     const isc::dns::RRClass rrclass_;
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index 04ad610..b4d0df7 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -630,3 +630,31 @@ database module are shown in the log message.
 Debug information.  A set of updates to a zone has been successfully
 committed to the corresponding database backend.  The zone name,
 its class and the database name are printed.
+
+% DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source.  The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+
+% DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset.  The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+
+% DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences.  The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+
+% DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset.  The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message.  The administrator should examine the diff in the database
+to find any invalid data and fix it.
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 1bf17f6..a79ee5b 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -850,6 +850,13 @@ InMemoryClient::getUpdater(const isc::dns::Name&, bool, bool) const {
     isc_throw(isc::NotImplemented, "Update attempt on in memory data source");
 }
 
+pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+InMemoryClient::getJournalReader(const isc::dns::Name&, uint32_t,
+                                 uint32_t) const
+{
+    isc_throw(isc::NotImplemented, "Journaling isn't supported for "
+              "in memory data source");
+}
 
 namespace {
 // convencience function to add an error message to a list of those
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index a9764fe..b852eb3 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -287,6 +287,10 @@ public:
                                       bool replace, bool journaling = false)
         const;
 
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+                     uint32_t end_serial) const;
+
 private:
     // TODO: Do we still need the PImpl if nobody should manipulate this class
     // directly any more (it should be handled through DataSourceClient)?
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index 250b46a..08be824 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -71,7 +71,6 @@ public:
         DataSourceError(file, line, what) {}
 };
 
-
 struct SQLite3Parameters;
 
 /**
diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc
index ade6fc7..64ad25f 100644
--- a/src/lib/datasrc/tests/client_unittest.cc
+++ b/src/lib/datasrc/tests/client_unittest.cc
@@ -12,6 +12,8 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <utility>
+
 #include <datasrc/client.h>
 
 #include <dns/name.h>
@@ -37,6 +39,11 @@ public:
     {
         return (ZoneUpdaterPtr());
     }
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name&, uint32_t, uint32_t) const {
+        isc_throw(isc::NotImplemented, "Journaling isn't supported "
+                  "in Nop data source");
+    }
 };
 
 class ClientTest : public ::testing::Test {
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index 007c634..920c9a2 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -12,11 +12,15 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <stdlib.h>
+
 #include <boost/shared_ptr.hpp>
 #include <boost/lexical_cast.hpp>
 
 #include <gtest/gtest.h>
 
+#include <exceptions/exceptions.h>
+
 #include <dns/name.h>
 #include <dns/rrttl.h>
 #include <dns/rrset.h>
@@ -31,6 +35,7 @@
 #include <testutils/dnsmessage_test.h>
 
 #include <map>
+#include <vector>
 
 using namespace isc::datasrc;
 using namespace std;
@@ -259,7 +264,7 @@ public:
 
     virtual IteratorContextPtr getDiffs(int, uint32_t, uint32_t) const {
         isc_throw(isc::NotImplemented,
-                  "This database datasource can't be iterated");
+                  "This database datasource doesn't support diffs");
     }
 
     virtual std::string findPreviousName(int, const std::string&) const {
@@ -550,6 +555,29 @@ private:
             }
         }
     };
+    class MockDiffIteratorContext : public IteratorContext {
+        const vector<JournalEntry> diffs_;
+        vector<JournalEntry>::const_iterator it_;
+    public:
+        MockDiffIteratorContext(const vector<JournalEntry>& diffs) :
+            diffs_(diffs), it_(diffs_.begin())
+        {}
+        virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+            if (it_ == diffs_.end()) {
+                return (false);
+            }
+            data[DatabaseAccessor::NAME_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_NAME];
+            data[DatabaseAccessor::TYPE_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_TYPE];
+            data[DatabaseAccessor::TTL_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_TTL];
+            data[DatabaseAccessor::RDATA_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_RDATA];
+            ++it_;
+            return (true);
+        }
+    };
 public:
     virtual IteratorContextPtr getAllRecords(int id) const {
         if (id == READONLY_ZONE_ID) {
@@ -729,12 +757,52 @@ public:
             isc_throw(DataSourceError, "Test error");
         } else {
             journal_entries_->push_back(JournalEntry(id, serial, operation,
-                                                    data));
+                                                     data));
+        }
+    }
+
+    virtual IteratorContextPtr getDiffs(int id, uint32_t start,
+                                        uint32_t end) const
+    {
+        vector<JournalEntry> selected_jnl;
+
+        for (vector<JournalEntry>::const_iterator it =
+                 journal_entries_->begin();
+             it != journal_entries_->end(); ++it)
+        {
+            // For simplicity we assume this method is called for the
+            // "readonly" zone possibly after making updates on the "writable"
+            // copy and committing them.
+            if (id != READONLY_ZONE_ID) {
+                continue;
+            }
+
+            // Note: the following logic is not 100% accurate in terms of
+            // serial number arithmetic; we prefer brevity for testing.
+            // Skip until we see the starting serial.  Once we started
+            // recording this condition is ignored (to support wrap-around
+            // case).  Also, it ignores the RR type; it only checks the
+            // versions.
+            if ((*it).serial_ < start && selected_jnl.empty()) {
+                continue;
+            }
+            if ((*it).serial_ > end) { // gone over the end serial. we're done.
+                break;
+            }
+            selected_jnl.push_back(*it);
         }
+
+        // Check if we've found the requested range.  If not, throw.
+        if (selected_jnl.empty() || selected_jnl.front().serial_ != start ||
+            selected_jnl.back().serial_ != end) {
+            isc_throw(NoSuchSerial, "requested diff range is not found");
+        }
+
+        return (IteratorContextPtr(new MockDiffIteratorContext(selected_jnl)));
     }
 
     // Check the journal is as expected and clear the journal
-    void checkJournal(const std::vector<JournalEntry> &expected) {
+    void checkJournal(const std::vector<JournalEntry> &expected) const {
         std::vector<JournalEntry> journal;
         // Clean the journal, but keep local copy to check
         journal.swap(*journal_entries_);
@@ -903,6 +971,24 @@ public:
      * times per test.
      */
     void createClient() {
+        // To make sure we always have empty diffs table at the beginning of
+        // each test, we re-install the writable data source here.
+        // Note: this is SQLite3 specific and a waste (though otherwise
+        // harmless) for other types of data sources.  If and when we support
+        // more types of data sources in this test framework, we should
+        // probably move this to some specialized templated method specific
+        // to SQLite3 (or for even a longer term we should add an API to
+        // purge the diffs table).
+        const char* const install_cmd = INSTALL_PROG " " TEST_DATA_DIR
+            "/rwtest.sqlite3 " TEST_DATA_BUILDDIR
+            "/rwtest.sqlite3.copied";
+        if (system(install_cmd) != 0) {
+            // any exception will do, this is failure in test setup, but nice
+            // to show the command that fails, and shouldn't be caught
+            isc_throw(isc::Exception,
+                      "Error setting up; command failed: " << install_cmd);
+        }
+
         current_accessor_ = new ACCESSOR_TYPE();
         is_mock_ = (dynamic_cast<MockAccessor*>(current_accessor_) != NULL);
         client_.reset(new DatabaseClient(qclass_,
@@ -968,6 +1054,48 @@ public:
         }
     }
 
+    void checkJournal(const vector<JournalEntry>& expected) {
+        if (is_mock_) {
+            const MockAccessor* mock_accessor =
+                dynamic_cast<const MockAccessor*>(current_accessor_);
+            mock_accessor->checkJournal(expected);
+        } else {
+            // For other generic databases, retrieve the diff using the
+            // reader class and compare the resulting sequence of RRset.
+            // For simplicity we only consider the case where the expected
+            // sequence is not empty.
+            ASSERT_FALSE(expected.empty());
+            const Name zone_name(expected.front().
+                                 data_[DatabaseAccessor::DIFF_NAME]);
+            ZoneJournalReaderPtr jnl_reader =
+                client_->getJournalReader(zone_name,
+                                          expected.front().serial_,
+                                          expected.back().serial_).second;
+            ASSERT_TRUE(jnl_reader);
+            ConstRRsetPtr rrset;
+            vector<JournalEntry>::const_iterator it = expected.begin();
+            for (rrset = jnl_reader->getNextDiff();
+                 rrset && it != expected.end();
+                 rrset = jnl_reader->getNextDiff(), ++it) {
+                typedef DatabaseAccessor Accessor;
+                RRsetPtr expected_rrset(
+                    new RRset(Name((*it).data_[Accessor::DIFF_NAME]),
+                              qclass_,
+                              RRType((*it).data_[Accessor::DIFF_TYPE]),
+                              RRTTL((*it).data_[Accessor::DIFF_TTL])));
+                expected_rrset->addRdata(
+                    rdata::createRdata(expected_rrset->getType(),
+                                       expected_rrset->getClass(),
+                                       (*it).data_[Accessor::DIFF_RDATA]));
+                isc::testutils::rrsetCheck(expected_rrset, rrset);
+            }
+            // We should have examined all entries of both expected and
+            // actual data.
+            EXPECT_TRUE(it == expected.end());
+            ASSERT_FALSE(rrset);
+        }
+    }
+
     // Some tests only work for MockAccessor.  We remember whether our accessor
     // is of that type.
     bool is_mock_;
@@ -2800,17 +2928,20 @@ TEST_F(MockDatabaseClientTest, badName) {
 /*
  * Test correct use of the updater with a journal.
  */
-TEST_F(MockDatabaseClientTest, journal) {
-    updater_ = client_->getUpdater(zname_, false, true);
-    updater_->deleteRRset(*soa_);
-    updater_->deleteRRset(*rrset_);
-    soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
-    soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
-                                      "ns1.example.org. admin.example.org. "
-                                      "1235 3600 1800 2419200 7200"));
-    updater_->addRRset(*soa_);
-    updater_->addRRset(*rrset_);
-    ASSERT_NO_THROW(updater_->commit());
+TYPED_TEST(DatabaseClientTest, journal) {
+    this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+    this->updater_->deleteRRset(*this->soa_);
+    this->updater_->deleteRRset(*this->rrset_);
+    this->soa_.reset(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                               this->rrttl_));
+    this->soa_->addRdata(rdata::createRdata(this->soa_->getType(),
+                                            this->soa_->getClass(),
+                                            "ns1.example.org. "
+                                            "admin.example.org. "
+                                            "1235 3600 1800 2419200 7200"));
+    this->updater_->addRRset(*this->soa_);
+    this->updater_->addRRset(*this->rrset_);
+    ASSERT_NO_THROW(this->updater_->commit());
     std::vector<JournalEntry> expected;
     expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
                                     DatabaseAccessor::DIFF_DELETE,
@@ -2830,21 +2961,21 @@ TEST_F(MockDatabaseClientTest, journal) {
                                     DatabaseAccessor::DIFF_ADD,
                                     "www.example.org.", "A", "3600",
                                     "192.0.2.2"));
-    current_accessor_->checkJournal(expected);
+    this->checkJournal(expected);
 }
 
 /*
  * Push multiple delete-add sequences. Checks it is allowed and all is
  * saved.
  */
-TEST_F(MockDatabaseClientTest, journalMultiple) {
+TYPED_TEST(DatabaseClientTest, journalMultiple) {
     std::vector<JournalEntry> expected;
-    updater_ = client_->getUpdater(zname_, false, true);
+    this->updater_ = this->client_->getUpdater(this->zname_, false, true);
     std::string soa_rdata = "ns1.example.org. admin.example.org. "
         "1234 3600 1800 2419200 7200";
     for (size_t i(1); i < 100; ++ i) {
         // Remove the old SOA
-        updater_->deleteRRset(*soa_);
+        this->updater_->deleteRRset(*this->soa_);
         expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i - 1,
                                         DatabaseAccessor::DIFF_DELETE,
                                         "example.org.", "SOA", "3600",
@@ -2852,19 +2983,21 @@ TEST_F(MockDatabaseClientTest, journalMultiple) {
         // Create a new SOA
         soa_rdata = "ns1.example.org. admin.example.org. " +
             lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
-        soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
-        soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
-                                          soa_rdata));
+        this->soa_.reset(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                                   this->rrttl_));
+        this->soa_->addRdata(rdata::createRdata(this->soa_->getType(),
+                                                this->soa_->getClass(),
+                                                soa_rdata));
         // Add the new SOA
-        updater_->addRRset(*soa_);
+        this->updater_->addRRset(*this->soa_);
         expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i,
                                         DatabaseAccessor::DIFF_ADD,
                                         "example.org.", "SOA", "3600",
                                         soa_rdata));
     }
-    ASSERT_NO_THROW(updater_->commit());
+    ASSERT_NO_THROW(this->updater_->commit());
     // Check the journal contains everything.
-    current_accessor_->checkJournal(expected);
+    this->checkJournal(expected);
 }
 
 /*
@@ -2872,46 +3005,50 @@ TEST_F(MockDatabaseClientTest, journalMultiple) {
  *
  * Note that we implicitly test in different testcases (these for add and
  * delete) that if the journaling is false, it doesn't expect the order.
+ *
+ * In this test we don't check with the real databases as this case shouldn't
+ * contain backend specific behavior.
  */
 TEST_F(MockDatabaseClientTest, journalBadSequence) {
     std::vector<JournalEntry> expected;
     {
         SCOPED_TRACE("Delete A before SOA");
-        updater_ = client_->getUpdater(zname_, false, true);
-        EXPECT_THROW(updater_->deleteRRset(*rrset_), isc::BadValue);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+        EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_),
+                     isc::BadValue);
         // Make sure the journal is empty now
-        current_accessor_->checkJournal(expected);
+        this->checkJournal(expected);
     }
 
     {
         SCOPED_TRACE("Add before delete");
-        updater_ = client_->getUpdater(zname_, false, true);
-        EXPECT_THROW(updater_->addRRset(*soa_), isc::BadValue);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+        EXPECT_THROW(this->updater_->addRRset(*this->soa_), isc::BadValue);
         // Make sure the journal is empty now
-        current_accessor_->checkJournal(expected);
+        this->checkJournal(expected);
     }
 
     {
         SCOPED_TRACE("Add A before SOA");
-        updater_ = client_->getUpdater(zname_, false, true);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
         // So far OK
-        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
         // But we miss the add SOA here
-        EXPECT_THROW(updater_->addRRset(*rrset_), isc::BadValue);
+        EXPECT_THROW(this->updater_->addRRset(*this->rrset_), isc::BadValue);
         // Make sure the journal contains only the first one
         expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
                                         DatabaseAccessor::DIFF_DELETE,
                                         "example.org.", "SOA", "3600",
                                         "ns1.example.org. admin.example.org. "
                                         "1234 3600 1800 2419200 7200"));
-        current_accessor_->checkJournal(expected);
+        this->checkJournal(expected);
     }
 
     {
         SCOPED_TRACE("Commit before add");
-        updater_ = client_->getUpdater(zname_, false, true);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
         // So far OK
-        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
         // Commit at the wrong time
         EXPECT_THROW(updater_->commit(), isc::BadValue);
         current_accessor_->checkJournal(expected);
@@ -2919,29 +3056,29 @@ TEST_F(MockDatabaseClientTest, journalBadSequence) {
 
     {
         SCOPED_TRACE("Delete two SOAs");
-        updater_ = client_->getUpdater(zname_, false, true);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
         // So far OK
-        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
         // Delete the SOA again
-        EXPECT_THROW(updater_->deleteRRset(*soa_), isc::BadValue);
-        current_accessor_->checkJournal(expected);
+        EXPECT_THROW(this->updater_->deleteRRset(*this->soa_), isc::BadValue);
+        this->checkJournal(expected);
     }
 
     {
         SCOPED_TRACE("Add two SOAs");
-        updater_ = client_->getUpdater(zname_, false, true);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
         // So far OK
-        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
         // Still OK
-        EXPECT_NO_THROW(updater_->addRRset(*soa_));
+        EXPECT_NO_THROW(this->updater_->addRRset(*this->soa_));
         // But this one is added again
-        EXPECT_THROW(updater_->addRRset(*soa_), isc::BadValue);
+        EXPECT_THROW(this->updater_->addRRset(*this->soa_), isc::BadValue);
         expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
                                         DatabaseAccessor::DIFF_ADD,
                                         "example.org.", "SOA", "3600",
                                         "ns1.example.org. admin.example.org. "
                                         "1234 3600 1800 2419200 7200"));
-        current_accessor_->checkJournal(expected);
+        this->checkJournal(expected);
     }
 }
 
@@ -2949,8 +3086,9 @@ TEST_F(MockDatabaseClientTest, journalBadSequence) {
  * Test it rejects to store journals when we request it together with
  * erasing the whole zone.
  */
-TEST_F(MockDatabaseClientTest, journalOnErase) {
-    EXPECT_THROW(client_->getUpdater(zname_, true, true), isc::BadValue);
+TYPED_TEST(DatabaseClientTest, journalOnErase) {
+    EXPECT_THROW(this->client_->getUpdater(this->zname_, true, true),
+                 isc::BadValue);
 }
 
 /*
@@ -2974,4 +3112,149 @@ TEST_F(MockDatabaseClientTest, journalException) {
     EXPECT_THROW(updater_->deleteRRset(*soa_), DataSourceError);
 }
 
+//
+// Tests for the ZoneJournalReader
+//
+
+// Install a simple, commonly used diff sequence: making an update from one
+// SOA to another.  Return the end SOA RRset for the convenience of the caller.
+ConstRRsetPtr
+makeSimpleDiff(DataSourceClient& client, const Name& zname,
+               const RRClass& rrclass, ConstRRsetPtr begin_soa)
+{
+    ZoneUpdaterPtr updater = client.getUpdater(zname, false, true);
+    updater->deleteRRset(*begin_soa);
+    RRsetPtr soa_end(new RRset(zname, rrclass, RRType::SOA(), RRTTL(3600)));
+    soa_end->addRdata(rdata::createRdata(RRType::SOA(), rrclass,
+                                         "ns1.example.org. admin.example.org. "
+                                         "1235 3600 1800 2419200 7200"));
+    updater->addRRset(*soa_end);
+    updater->commit();
+
+    return (soa_end);
+}
+
+TYPED_TEST(DatabaseClientTest, journalReader) {
+    // Check the simple case made by makeSimpleDiff.
+    ConstRRsetPtr soa_end = makeSimpleDiff(*this->client_, this->zname_,
+                                           this->qclass_, this->soa_);
+    pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+        this->client_->getJournalReader(this->zname_, 1234, 1235);
+    EXPECT_EQ(ZoneJournalReader::SUCCESS, result.first);
+    ZoneJournalReaderPtr jnl_reader = result.second;
+    ASSERT_TRUE(jnl_reader);
+    ConstRRsetPtr rrset = jnl_reader->getNextDiff();
+    ASSERT_TRUE(rrset);
+    isc::testutils::rrsetCheck(this->soa_, rrset);
+    rrset = jnl_reader->getNextDiff();
+    ASSERT_TRUE(rrset);
+    isc::testutils::rrsetCheck(soa_end, rrset);
+    rrset = jnl_reader->getNextDiff();
+    ASSERT_FALSE(rrset);
+
+    // Once it reaches the end of the sequence, further read attempt will
+    // result in exception.
+    EXPECT_THROW(jnl_reader->getNextDiff(), isc::InvalidOperation);
+}
+
+TYPED_TEST(DatabaseClientTest, readLargeJournal) {
+    // Similar to journalMultiple, but check that at a higher level.
+
+    this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+
+    vector<ConstRRsetPtr> expected;
+    for (size_t i = 0; i < 100; ++i) {
+        // Create the old SOA and remove it, and record it in the expected list
+        RRsetPtr rrset1(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                                  this->rrttl_));
+        string soa_rdata = "ns1.example.org. admin.example.org. " +
+            lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
+        rrset1->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+                                            soa_rdata));
+        this->updater_->deleteRRset(*rrset1);
+        expected.push_back(rrset1);
+
+        // Create a new SOA, add it, and record it.
+        RRsetPtr rrset2(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                                  this->rrttl_));
+        soa_rdata = "ns1.example.org. admin.example.org. " +
+            lexical_cast<std::string>(1234 + i + 1) +
+            " 3600 1800 2419200 7200";
+        rrset2->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+                                            soa_rdata));
+        this->updater_->addRRset(*rrset2);
+        expected.push_back(rrset2);
+    }
+    this->updater_->commit();
+
+    ZoneJournalReaderPtr jnl_reader(this->client_->getJournalReader(
+                                        this->zname_, 1234, 1334).second);
+    ConstRRsetPtr actual;
+    int i = 0;
+    while ((actual = jnl_reader->getNextDiff()) != NULL) {
+        isc::testutils::rrsetCheck(expected.at(i++), actual);
+    }
+    EXPECT_EQ(expected.size(), i); // we should have eaten all expected data
+}
+
+TYPED_TEST(DatabaseClientTest, readJournalForNoRange) {
+    makeSimpleDiff(*this->client_, this->zname_, this->qclass_, this->soa_);
+
+    // The specified range does not exist in the diff storage.  The factory
+    // method should result in NO_SUCH_VERSION
+    pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+        this->client_->getJournalReader(this->zname_, 1200, 1235);
+    EXPECT_EQ(ZoneJournalReader::NO_SUCH_VERSION, result.first);
+    EXPECT_FALSE(result.second);
+}
+
+TYPED_TEST(DatabaseClientTest, journalReaderForNXZone) {
+    pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+        this->client_->getJournalReader(Name("nosuchzone"), 0, 1);
+    EXPECT_EQ(ZoneJournalReader::NO_SUCH_ZONE, result.first);
+    EXPECT_FALSE(result.second);
+}
+
+// A helper function for journalWithBadData.  It installs a simple diff
+// from one serial (of 'begin') to another ('begin' + 1), tweaking a specified
+// field of data with some invalid value.
+void
+installBadDiff(MockAccessor& accessor, uint32_t begin,
+               DatabaseAccessor::DiffRecordParams modify_param,
+               const char* const data)
+{
+    string data1[] = {"example.org.", "SOA", "3600", "ns. root. 1 1 1 1 1"};
+    string data2[] = {"example.org.", "SOA", "3600", "ns. root. 2 1 1 1 1"};
+    data1[modify_param] = data;
+    accessor.addRecordDiff(READONLY_ZONE_ID, begin,
+                           DatabaseAccessor::DIFF_DELETE, data1);
+    accessor.addRecordDiff(READONLY_ZONE_ID, begin + 1,
+                           DatabaseAccessor::DIFF_ADD, data2);
+}
+
+TEST_F(MockDatabaseClientTest, journalWithBadData) {
+    MockAccessor& mock_accessor =
+        dynamic_cast<MockAccessor&>(*current_accessor_);
+
+    // One of the fields from the data source is broken as an RR parameter.
+    // The journal reader should still be constructed, but getNextDiff()
+    // should result in exception.
+    installBadDiff(mock_accessor, 1, DatabaseAccessor::DIFF_NAME,
+                   "example..org");
+    installBadDiff(mock_accessor, 3, DatabaseAccessor::DIFF_TYPE,
+                   "bad-rrtype");
+    installBadDiff(mock_accessor, 5, DatabaseAccessor::DIFF_TTL,
+                   "bad-ttl");
+    installBadDiff(mock_accessor, 7, DatabaseAccessor::DIFF_RDATA,
+                   "bad rdata");
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 1, 2).
+                 second->getNextDiff(), DataSourceError);
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 3, 4).
+                 second->getNextDiff(), DataSourceError);
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 5, 6).
+                 second->getNextDiff(), DataSourceError);
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 7, 8).
+                 second->getNextDiff(), DataSourceError);
+}
+
 }
diff --git a/src/lib/datasrc/tests/testdata/Makefile.am b/src/lib/datasrc/tests/testdata/Makefile.am
index 64ae955..6a35fe3 100644
--- a/src/lib/datasrc/tests/testdata/Makefile.am
+++ b/src/lib/datasrc/tests/testdata/Makefile.am
@@ -1,6 +1 @@
 CLEANFILES = *.copied
-BUILT_SOURCES = rwtest.sqlite3.copied
-
-# We use install-sh with the -m option to make sure it's writable
-rwtest.sqlite3.copied: $(srcdir)/rwtest.sqlite3
-	$(top_srcdir)/install-sh -m 644 $(srcdir)/rwtest.sqlite3 $@
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index f824636..9fcd289 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -560,6 +560,98 @@ public:
 /// \brief A pointer-like type pointing to a \c ZoneUpdater object.
 typedef boost::shared_ptr<ZoneUpdater> ZoneUpdaterPtr;
 
+/// The base class for retrieving differences between two versions of a zone.
+///
+/// On construction, each derived class object will internally set up
+/// retrieving sequences of differences between two specific version of
+/// a specific zone managed in a particular data source.  So the constructor
+/// of a derived class would normally take parameters to identify the zone
+/// and the two versions for which the differences should be retrieved.
+/// See \c DataSourceClient::getJournalReader for more concrete details
+/// used in this API.
+///
+/// Once constructed, an object of this class will act like an iterator
+/// over the sequences.  Every time the \c getNextDiff() method is called
+/// it returns one element of the differences in the form of an \c RRset
+/// until it reaches the end of the entire sequences.
+class ZoneJournalReader {
+public:
+    /// Result codes used by a factory method for \c ZoneJournalReader
+    enum Result {
+        SUCCESS, ///< A \c ZoneJournalReader object successfully created
+        NO_SUCH_ZONE, ///< Specified zone does not exist in the data source
+        NO_SUCH_VERSION ///< Specified versions do not exist in the diff storage
+    };
+
+protected:
+    /// The default constructor.
+    ///
+    /// This is intentionally defined as protected to ensure that this base
+    /// class is never instantiated directly.
+    ZoneJournalReader() {}
+
+public:
+    /// The destructor
+    virtual ~ZoneJournalReader() {}
+
+    /// Return the next difference RR of difference sequences.
+    ///
+    /// In this API, the difference between two versions of a zone is
+    /// conceptually represented as IXFR-style difference sequences:
+    /// Each difference sequence is a sequence of RRs: an older version of
+    /// SOA (to be deleted), zero or more other deleted RRs, the
+    /// post-transaction SOA (to be added), and zero or more other
+    /// added RRs.  (Note, however, that the underlying data source
+    /// implementation may or may not represent the difference in
+    /// straightforward realization of this concept.  The mapping between
+    /// the conceptual difference and the actual implementation is hidden
+    /// in each derived class).
+    ///
+    /// This method provides an application with a higher level interface
+    /// to retrieve the difference along with the conceptual model: the
+    /// \c ZoneJournalReader object iterates over the entire sequences
+    /// from the beginning SOA (which is to be deleted) to one of the
+    /// added RR of with the ending SOA, and each call to this method returns
+    /// one RR in the form of an \c RRset that contains exactly one RDATA
+    /// in the order of the sequences.
+    ///
+    /// Note that the ordering of the sequences specifies the semantics of
+    /// each difference: add or delete.  For example, the first RR is to
+    /// be deleted, and the last RR is to be added.  So the return value
+    /// of this method does not explicitly indicate whether the RR is to be
+    /// added or deleted.
+    ///
+    /// This method ensures the returned \c RRset represents an RR, that is,
+    /// it contains exactly one RDATA.  However, it does not necessarily
+    /// ensure that the resulting sequences are in the form of IXFR-style.
+    /// For example, the first RR is supposed to be an SOA, and it should
+    /// normally be the case, but this interface does not necessarily require
+    /// the derived class implementation ensure this.  Normally the
+    /// differences are expected to be stored using this API (via a
+    /// \c ZoneUpdater object), and as long as that is the case and the
+    /// underlying implementation follows the requirement of the API, the
+    /// result of this method should be a valid IXFR-style sequences.
+    /// So this API does not mandate the almost redundant check as part of
+    /// the interface.  If the application needs to make it sure 100%, it
+    /// must check the resulting sequence itself.
+    ///
+    /// Once the object reaches the end of the sequences, this method returns
+    /// \c Null.  Any subsequent call will result in an exception of
+    /// class \c InvalidOperation.
+    ///
+    /// \exception InvalidOperation The method is called beyond the end of
+    /// the difference sequences.
+    /// \exception DataSourceError Underlying data is broken and the RR
+    /// cannot be created or other low level data source error.
+    ///
+    /// \return An \c RRset that contains one RDATA corresponding to the
+    /// next difference in the sequences.
+    virtual isc::dns::ConstRRsetPtr getNextDiff() = 0;
+};
+
+/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
+typedef boost::shared_ptr<ZoneJournalReader> ZoneJournalReaderPtr;
+
 } // end of datasrc
 } // end of isc
 
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index 433bb7d..b68f3c4 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -126,6 +126,17 @@ public:
         isc::Exception(file, line, what) {}
 };
 
+/// \brief A generic exception that is thrown if a function is called
+/// in a prohibited way.
+///
+/// For example, this can happen if a class method is called when the object's
+/// state does not allow that particular method.
+class InvalidOperation : public Exception {
+public:
+    InvalidOperation(const char* file, size_t line, const char* what) :
+        isc::Exception(file, line, what) {}
+};
+
 ///
 /// \brief A generic exception that is thrown when an unexpected
 /// error condition occurs.
diff --git a/src/lib/python/isc/bind10/component.py b/src/lib/python/isc/bind10/component.py
index 248bd3b..e4fb3ec 100644
--- a/src/lib/python/isc/bind10/component.py
+++ b/src/lib/python/isc/bind10/component.py
@@ -39,6 +39,7 @@ START_CMD = 'start'
 STOP_CMD = 'stop'
 
 STARTED_OK_TIME = 10
+COMPONENT_RESTART_DELAY = 10
 
 STATE_DEAD = 'dead'
 STATE_STOPPED = 'stopped'
@@ -99,11 +100,18 @@ class BaseComponent:
             but it is vital part of the service (like auth server). If
             it fails to start or crashes in less than 10s after the first
             startup, the system is brought down. If it crashes later on,
-            it is restarted.
+            it is restarted (see below).
           * 'dispensable' means the component should be running, but if it
             doesn't start or crashes for some reason, the system simply tries
             to restart it and keeps running.
 
+        For components that are restarted, the restarts are not always
+        immediate; if the component has run for more than
+        COMPONENT_RESTART_DELAY (10) seconds, they are restarted right
+        away. If the component has not run that long, the system waits
+        until that time has passed (since the last start) until the
+        component is restarted.
+
         Note that the __init__ method of child class should have these
         parameters:
 
@@ -134,6 +142,7 @@ class BaseComponent:
         self.__state = STATE_STOPPED
         self._kind = kind
         self._boss = boss
+        self._original_start_time = None
 
     def start(self):
         """
@@ -149,6 +158,9 @@ class BaseComponent:
         logger.info(BIND10_COMPONENT_START, self.name())
         self.__state = STATE_RUNNING
         self.__start_time = time.time()
+        if self._original_start_time is None:
+            self._original_start_time = self.__start_time
+        self._restart_time = None
         try:
             self._start_internal()
         except Exception as e:
@@ -188,6 +200,11 @@ class BaseComponent:
         The exit code is used for logging. It might be None.
 
         It calls _failed_internal internally.
+
+        Returns True if the process was immediately restarted, returns
+                False is the process was not restarted, either because
+                it is considered a core or needed component, or because
+                the component is to be restarted later.
         """
         logger.error(BIND10_COMPONENT_FAILED, self.name(), self.pid(),
                      exit_code if exit_code is not None else "unknown")
@@ -199,14 +216,47 @@ class BaseComponent:
         # (including it stopped really soon)
         if self._kind == 'core' or \
             (self._kind == 'needed' and time.time() - STARTED_OK_TIME <
-             self.__start_time):
+             self._original_start_time):
             self.__state = STATE_DEAD
             logger.fatal(BIND10_COMPONENT_UNSATISFIED, self.name())
             self._boss.component_shutdown(1)
+            return False
         # This means we want to restart
         else:
-            logger.warn(BIND10_COMPONENT_RESTART, self.name())
+            # if the component was only running for a short time, don't
+            # restart right away, but set a time it wants to restarted,
+            # and return that it wants to be restarted later
+            self.set_restart_time()
+            return self.restart()
+
+    def set_restart_time(self):
+        """Calculates and sets the time this component should be restarted.
+           Currently, it uses a very basic algorithm; start time +
+           RESTART_DELAY (10 seconds). This algorithm may be improved upon
+           in the future.
+        """
+        self._restart_at = self.__start_time + COMPONENT_RESTART_DELAY
+
+    def get_restart_time(self):
+        """Returns the time at which this component should be restarted."""
+        return self._restart_at
+
+    def restart(self, now = None):
+        """Restarts the component if it has a restart_time and if the value
+           of the restart_time is smaller than 'now'.
+
+           If the parameter 'now' is given, its value will be used instead
+           of calling time.time().
+
+           Returns True if the component is restarted, False if not."""
+        if now is None:
+            now = time.time()
+        if self.get_restart_time() is not None and\
+           self.get_restart_time() < now:
             self.start()
+            return True
+        else:
+            return False
 
     def running(self):
         """
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
index 15fa470..6bf9e58 100644
--- a/src/lib/python/isc/bind10/tests/component_test.py
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -221,11 +221,6 @@ class ComponentTests(BossUtils, unittest.TestCase):
         """
         Check the component restarted successfully.
 
-        Currently, it is implemented as starting it again right away. This will
-        change, it will register itself into the restart schedule in boss. But
-        as the integration with boss is not clear yet, we don't know how
-        exactly that will happen.
-
         Reset the self.__start_called to False before calling the function when
         the component should fail.
         """
@@ -237,6 +232,16 @@ class ComponentTests(BossUtils, unittest.TestCase):
         # Check it can't be started again
         self.assertRaises(ValueError, component.start)
 
+    def __check_not_restarted(self, component):
+        """
+        Check the component has not (yet) restarted successfully.
+        """
+        self.assertFalse(self._shutdown)
+        self.assertTrue(self.__start_called)
+        self.assertFalse(self.__stop_called)
+        self.assertTrue(self.__failed_called)
+        self.assertFalse(component.running())
+
     def __do_start_stop(self, kind):
         """
         This is a body of a test. It creates a component of given kind,
@@ -296,7 +301,9 @@ class ComponentTests(BossUtils, unittest.TestCase):
         component.start()
         self.__check_started(component)
         # Pretend the component died
-        component.failed(1)
+        restarted = component.failed(1)
+        # Since it is a core component, it should not be restarted
+        self.assertFalse(restarted)
         # It should bring down the whole server
         self.__check_dead(component)
 
@@ -312,7 +319,9 @@ class ComponentTests(BossUtils, unittest.TestCase):
         self.__check_started(component)
         self._timeskip()
         # Pretend the component died some time later
-        component.failed(1)
+        restarted = component.failed(1)
+        # Should not be restarted
+        self.assertFalse(restarted)
         # Check the component is still dead
         self.__check_dead(component)
 
@@ -328,7 +337,9 @@ class ComponentTests(BossUtils, unittest.TestCase):
         component.start()
         self.__check_started(component)
         # Make it fail right away.
-        component.failed(1)
+        restarted = component.failed(1)
+        # Should not have restarted
+        self.assertFalse(restarted)
         self.__check_dead(component)
 
     def test_start_fail_needed_later(self):
@@ -344,37 +355,65 @@ class ComponentTests(BossUtils, unittest.TestCase):
         # Make it fail later on
         self.__start_called = False
         self._timeskip()
-        component.failed(1)
+        restarted = component.failed(1)
+        # Should have restarted
+        self.assertTrue(restarted)
         self.__check_restarted(component)
 
     def test_start_fail_dispensable(self):
         """
-        Start and then fail a dispensable component. Should just get restarted.
+        Start and then fail a dispensable component. Should not get restarted.
         """
         # Just ordinary startup
-        component = self.__create_component('needed')
+        component = self.__create_component('dispensable')
         self.__check_startup(component)
         component.start()
         self.__check_started(component)
         # Make it fail right away
-        self.__start_called = False
-        component.failed(1)
-        self.__check_restarted(component)
+        restarted = component.failed(1)
+        # Should signal that it did not restart
+        self.assertFalse(restarted)
+        self.__check_not_restarted(component)
 
-    def test_start_fail_dispensable(self):
+    def test_start_fail_dispensable_later(self):
         """
         Start and then later on fail a dispensable component. Should just get
         restarted.
         """
         # Just ordinary startup
-        component = self.__create_component('needed')
+        component = self.__create_component('dispensable')
         self.__check_startup(component)
         component.start()
         self.__check_started(component)
         # Make it fail later on
-        self.__start_called = False
         self._timeskip()
-        component.failed(1)
+        restarted = component.failed(1)
+        # should signal that it restarted
+        self.assertTrue(restarted)
+        # and check if it really did
+        self.__check_restarted(component)
+
+    def test_start_fail_dispensable_restart_later(self):
+        """
+        Start and then fail a dispensable component, wait a bit and try to
+        restart. Should get restarted after the wait.
+        """
+        # Just ordinary startup
+        component = self.__create_component('dispensable')
+        self.__check_startup(component)
+        component.start()
+        self.__check_started(component)
+        # Make it fail immediately
+        restarted = component.failed(1)
+        # should signal that it did not restart
+        self.assertFalse(restarted)
+        self.__check_not_restarted(component)
+        self._timeskip()
+        # try to restart again
+        restarted = component.restart()
+        # should signal that it restarted
+        self.assertTrue(restarted)
+        # and check if it really did
         self.__check_restarted(component)
 
     def test_fail_core(self):
@@ -402,14 +441,56 @@ class ComponentTests(BossUtils, unittest.TestCase):
     def test_fail_dispensable(self):
         """
         Failure to start a dispensable component. The exception should get
-        through, but it should be restarted.
+        through, but it should be restarted after a time skip.
         """
         component = self.__create_component('dispensable')
         self.__check_startup(component)
         component._start_internal = self.__fail_to_start
         self.assertRaises(TestError, component.start)
+        # tell it to see if it must restart
+        restarted = component.restart()
+        # should not have restarted yet
+        self.assertFalse(restarted)
+        self.__check_not_restarted(component)
+        self._timeskip()
+        # tell it to see if it must restart and do so, with our vision of time
+        restarted = component.restart()
+        # should have restarted now
+        self.assertTrue(restarted)
+        self.__check_restarted(component)
+
+    def test_component_start_time(self):
+        """
+        Check that original start time is set initially, and remains the same
+        after a restart, while the internal __start_time does change
+        """
+        # Just ordinary startup
+        component = self.__create_component('dispensable')
+        self.__check_startup(component)
+        self.assertIsNone(component._original_start_time)
+        component.start()
+        self.__check_started(component)
+
+        self.assertIsNotNone(component._original_start_time)
+        self.assertIsNotNone(component._BaseComponent__start_time)
+        original_start_time = component._original_start_time
+        start_time = component._BaseComponent__start_time
+        # Not restarted yet, so they should be the same
+        self.assertEqual(original_start_time, start_time)
+
+        self._timeskip()
+        # Make it fail
+        restarted = component.failed(1)
+        # should signal that it restarted
+        self.assertTrue(restarted)
+        # and check if it really did
         self.__check_restarted(component)
 
+        # original start time should not have changed
+        self.assertEqual(original_start_time, component._original_start_time)
+        # but actual start time should
+        self.assertNotEqual(start_time, component._BaseComponent__start_time)
+
     def test_bad_kind(self):
         """
         Test the component rejects nonsensical kinds. This includes bad
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index a5b4ca3..fb6d151 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -17,6 +17,7 @@ datasrc_la_SOURCES += client_python.cc client_python.h
 datasrc_la_SOURCES += iterator_python.cc iterator_python.h
 datasrc_la_SOURCES += finder_python.cc finder_python.h
 datasrc_la_SOURCES += updater_python.cc updater_python.h
+datasrc_la_SOURCES += journal_reader_python.cc journal_reader_python.h
 
 datasrc_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
 datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
@@ -30,6 +31,7 @@ EXTRA_DIST = client_inc.cc
 EXTRA_DIST += finder_inc.cc
 EXTRA_DIST += iterator_inc.cc
 EXTRA_DIST += updater_inc.cc
+EXTRA_DIST += journal_reader_inc.cc
 
 CLEANDIRS = __pycache__
 
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
index f4fb01b..e0c0f06 100644
--- a/src/lib/python/isc/datasrc/client_inc.cc
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -190,4 +190,60 @@ Parameters:\n\
   journaling The zone updater should store a journal of the changes.\n\
 \n\
 ";
+
+// Modifications from C++ doc:
+//   pointer -> (removed)
+//   Null -> None
+//   exception types
+const char* const DataSourceClient_getJournalReader_doc = "\
+get_journal_reader(zone, begin_serial, end_serial) ->\n\
+   (int, ZoneJournalReader)\n\
+\n\
+Return a journal reader to retrieve differences of a zone.\n\
+\n\
+A derived version of this method creates a concrete ZoneJournalReader\n\
+object specific to the underlying data source for the specified name\n\
+of zone and differences between the versions specified by the\n\
+beginning and ending serials of the corresponding SOA RRs. The RR\n\
+class of the zone is the one that the client is expected to handle\n\
+(see the detailed description of this class).\n\
+\n\
+Note that the SOA serials are compared by the semantics of the serial\n\
+number arithmetic. So, for example, begin_serial can be larger than\n\
+end_serial as bare unsigned integers. The underlying data source\n\
+implementation is assumed to keep track of sufficient history to\n\
+identify (if exist) the corresponding difference between the specified\n\
+versions.\n\
+\n\
+This method returns the result as a pair of a result code and a\n\
+ZoneJournalReader object. On success, the result code is\n\
+SUCCESS and the object must not be None; otherwise the result code is\n\
+something other than SUCCESS and the object must be None.\n\
+\n\
+If the specified zone is not found in the data source, the result code\n\
+is NO_SUCH_ZONE. Otherwise, if specified range of difference for the\n\
+zone is not found in the data source, the result code is\n\
+NO_SUCH_VERSION.\n\
+\n\
+Handling differences is an optional feature of data source. If the\n\
+underlying data source does not support difference handling, this\n\
+method for that type of data source can throw an exception of class\n\
+isc.datasrc.NotImplemented.\n\
+\n\
+Exceptions:\n\
+  isc.datasrc.NotImplemented The data source does not support differences.\n\
+  isc.datasrc.Error Other operational errors at the data source level.\n\
+  SystemError An unexpected error in the backend C++ code.  Either a rare\n\
+              system error such as short memory or an implementation bug.\n\
+\n\
+Parameters:\n\
+  zone       The name of the zone for which the difference should be\n\
+             retrieved.\n\
+  begin_serial The SOA serial of the beginning version of the\n\
+             differences.\n\
+  end_serial The SOA serial of the ending version of the differences.\n\
+\n\
+Return Value(s): A pair of result code and a ZoneJournalReader object\n\
+(which can be None)\n                                                  \
+";
 } // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
index 2740355..bdf84a3 100644
--- a/src/lib/python/isc/datasrc/client_python.cc
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -38,6 +38,7 @@
 #include "finder_python.h"
 #include "iterator_python.h"
 #include "updater_python.h"
+#include "journal_reader_python.h"
 #include "client_inc.cc"
 
 using namespace std;
@@ -173,6 +174,43 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
     }
 }
 
+PyObject*
+DataSourceClient_getJournalReader(PyObject* po_self, PyObject* args) {
+    s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+    PyObject *name_obj;
+    unsigned long begin_obj, end_obj;
+
+    if (PyArg_ParseTuple(args, "O!kk", &name_type, &name_obj,
+                         &begin_obj, &end_obj)) {
+        try {
+            pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+                self->cppobj->getInstance().getJournalReader(
+                    PyName_ToName(name_obj), static_cast<uint32_t>(begin_obj),
+                    static_cast<uint32_t>(end_obj));
+            PyObject* po_reader;
+            if (result.first == ZoneJournalReader::SUCCESS) {
+                po_reader = createZoneJournalReaderObject(result.second,
+                                                          po_self);
+            } else {
+                po_reader = Py_None;
+                Py_INCREF(po_reader); // this will soon be released
+            }
+            PyObjectContainer container(po_reader);
+            return (Py_BuildValue("(iO)", result.first, container.get()));
+        } catch (const isc::NotImplemented& ex) {
+            PyErr_SetString(getDataSourceException("NotImplemented"),
+                            ex.what());
+        } catch (const DataSourceError& ex) {
+            PyErr_SetString(getDataSourceException("Error"), ex.what());
+        } catch (const std::exception& ex) {
+            PyErr_SetString(PyExc_SystemError, ex.what());
+        } catch (...) {
+            PyErr_SetString(PyExc_SystemError, "Unexpected exception");
+        }
+    }
+    return (NULL);
+}
+
 // This list contains the actual set of functions we have in
 // python. Each entry has
 // 1. Python method name
@@ -180,18 +218,21 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
 // 3. Argument type
 // 4. Documentation
 PyMethodDef DataSourceClient_methods[] = {
-    { "find_zone", reinterpret_cast<PyCFunction>(DataSourceClient_findZone),
-      METH_VARARGS, DataSourceClient_findZone_doc },
+    { "find_zone", DataSourceClient_findZone, METH_VARARGS,
+      DataSourceClient_findZone_doc },
     { "get_iterator",
-      reinterpret_cast<PyCFunction>(DataSourceClient_getIterator), METH_VARARGS,
+      DataSourceClient_getIterator, METH_VARARGS,
       DataSourceClient_getIterator_doc },
-    { "get_updater", reinterpret_cast<PyCFunction>(DataSourceClient_getUpdater),
+    { "get_updater", DataSourceClient_getUpdater,
       METH_VARARGS, DataSourceClient_getUpdater_doc },
+    { "get_journal_reader", DataSourceClient_getJournalReader,
+      METH_VARARGS, DataSourceClient_getJournalReader_doc },
     { NULL, NULL, 0, NULL }
 };
 
 int
-DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
+DataSourceClient_init(PyObject* po_self, PyObject* args, PyObject*) {
+    s_DataSourceClient* self = static_cast<s_DataSourceClient*>(po_self);
     char* ds_type_str;
     char* ds_config_str;
     try {
@@ -236,7 +277,8 @@ DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
 }
 
 void
-DataSourceClient_destroy(s_DataSourceClient* const self) {
+DataSourceClient_destroy(PyObject* po_self) {
+    s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
     delete self->cppobj;
     self->cppobj = NULL;
     Py_TYPE(self)->tp_free(self);
@@ -255,7 +297,7 @@ PyTypeObject datasourceclient_type = {
     "datasrc.DataSourceClient",
     sizeof(s_DataSourceClient),         // tp_basicsize
     0,                                  // tp_itemsize
-    reinterpret_cast<destructor>(DataSourceClient_destroy),// tp_dealloc
+    DataSourceClient_destroy,           // tp_dealloc
     NULL,                               // tp_print
     NULL,                               // tp_getattr
     NULL,                               // tp_setattr
@@ -286,7 +328,7 @@ PyTypeObject datasourceclient_type = {
     NULL,                               // tp_descr_get
     NULL,                               // tp_descr_set
     0,                                  // tp_dictoffset
-    reinterpret_cast<initproc>(DataSourceClient_init),// tp_init
+    DataSourceClient_init,              // tp_init
     NULL,                               // tp_alloc
     PyType_GenericNew,                  // tp_new
     NULL,                               // tp_free
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
index 6ab29d8..1573b81 100644
--- a/src/lib/python/isc/datasrc/datasrc.cc
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -27,6 +27,7 @@
 #include "finder_python.h"
 #include "iterator_python.h"
 #include "updater_python.h"
+#include "journal_reader_python.h"
 
 #include <util/python/pycppwrapper_util.h>
 #include <dns/python/pydnspp_common.h>
@@ -192,6 +193,41 @@ initModulePart_ZoneUpdater(PyObject* mod) {
     return (true);
 }
 
+bool
+initModulePart_ZoneJournalReader(PyObject* mod) {
+    if (PyType_Ready(&journal_reader_type) < 0) {
+        return (false);
+    }
+    void* p = &journal_reader_type;
+    if (PyModule_AddObject(mod, "ZoneJournalReader",
+                           static_cast<PyObject*>(p)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&journal_reader_type);
+
+    try {
+        installClassVariable(journal_reader_type, "SUCCESS",
+                             Py_BuildValue("I", ZoneJournalReader::SUCCESS));
+        installClassVariable(journal_reader_type, "NO_SUCH_ZONE",
+                             Py_BuildValue("I",
+                                           ZoneJournalReader::NO_SUCH_ZONE));
+        installClassVariable(journal_reader_type, "NO_SUCH_VERSION",
+                             Py_BuildValue("I",
+                                           ZoneJournalReader::NO_SUCH_VERSION));
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in ZoneJournalReader initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (false);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+            "Unexpected failure in ZoneJournalReader initialization");
+        return (false);
+    }
+
+    return (true);
+}
 
 PyObject* po_DataSourceError;
 PyObject* po_NotImplemented;
@@ -239,6 +275,11 @@ PyInit_datasrc(void) {
         return (NULL);
     }
 
+    if (!initModulePart_ZoneJournalReader(mod)) {
+        Py_DECREF(mod);
+        return (NULL);
+    }
+
     try {
         po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
                                                 NULL);
diff --git a/src/lib/python/isc/datasrc/journal_reader_inc.cc b/src/lib/python/isc/datasrc/journal_reader_inc.cc
new file mode 100644
index 0000000..35ba70e
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_inc.cc
@@ -0,0 +1,80 @@
+namespace {
+const char* const ZoneJournalReader_doc = "\
+The base class for retrieving differences between two versions of a\n\
+zone.\n\
+\n\
+On construction, each derived class object will internally set up\n\
+retrieving sequences of differences between two specific version of a\n\
+specific zone managed in a particular data source. So the constructor\n\
+of a derived class would normally take parameters to identify the zone\n\
+and the two versions for which the differences should be retrieved.\n\
+See DataSourceClient.get_journal_reader for more concrete details used\n\
+in this API.\n\
+\n\
+Once constructed, an object of this class will act like an iterator\n\
+over the sequences. Every time the get_next_diff() method is called it\n\
+returns one element of the differences in the form of an RRset until\n\
+it reaches the end of the entire sequences.\n\
+\n\
+";
+
+// Modifications from C++ doc:
+//   ConstRRsetPtr -> RRset
+//   Null -> None
+//   InvalidOperation -> ValueError
+const char* const ZoneJournalReader_getNextDiff_doc = "\
+get_next_diff() -> isc.dns.RRset\n\
+\n\
+Return the next difference RR of difference sequences.\n\
+\n\
+In this API, the difference between two versions of a zone is\n\
+conceptually represented as IXFR-style difference sequences: Each\n\
+difference sequence is a sequence of RRs: an older version of SOA (to\n\
+be deleted), zero or more other deleted RRs, the post-transaction SOA\n\
+(to be added), and zero or more other added RRs. (Note, however, that\n\
+the underlying data source implementation may or may not represent the\n\
+difference in straightforward realization of this concept. The mapping\n\
+between the conceptual difference and the actual implementation is\n\
+hidden in each derived class).\n\
+\n\
+This method provides an application with a higher level interface to\n\
+retrieve the difference along with the conceptual model: the\n\
+ZoneJournalReader object iterates over the entire sequences from the\n\
+beginning SOA (which is to be deleted) to one of the added RR of with\n\
+the ending SOA, and each call to this method returns one RR in the\n\
+form of an RRset that contains exactly one RDATA in the order of the\n\
+sequences.\n\
+\n\
+Note that the ordering of the sequences specifies the semantics of\n\
+each difference: add or delete. For example, the first RR is to be\n\
+deleted, and the last RR is to be added. So the return value of this\n\
+method does not explicitly indicate whether the RR is to be added or\n\
+deleted.\n\
+\n\
+This method ensures the returned RRset represents an RR, that is, it\n\
+contains exactly one RDATA. However, it does not necessarily ensure\n\
+that the resulting sequences are in the form of IXFR-style. For\n\
+example, the first RR is supposed to be an SOA, and it should normally\n\
+be the case, but this interface does not necessarily require the\n\
+derived class implementation ensure this. Normally the differences are\n\
+expected to be stored using this API (via a ZoneUpdater object), and\n\
+as long as that is the case and the underlying implementation follows\n\
+the requirement of the API, the result of this method should be a\n\
+valid IXFR-style sequences. So this API does not mandate the almost\n\
+redundant check as part of the interface. If the application needs to\n\
+make it sure 100%, it must check the resulting sequence itself.\n\
+\n\
+Once the object reaches the end of the sequences, this method returns\n\
+None. Any subsequent call will result in an exception of class\n\
+ValueError.\n\
+\n\
+Exceptions:\n\
+  ValueError The method is called beyond the end of the\n\
+             difference sequences.\n\
+  isc.datasrc.Error Underlying data is broken and the RR cannot be\n\
+             created or other low level data source error.\n\
+\n\
+Return Value(s): An RRset that contains one RDATA corresponding to the\n\
+next difference in the sequences.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.cc b/src/lib/python/isc/datasrc/journal_reader_python.cc
new file mode 100644
index 0000000..ff398d1
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.cc
@@ -0,0 +1,200 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "journal_reader_python.h"
+
+#include "journal_reader_inc.cc"
+
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneJournalReader : public PyObject {
+public:
+    s_ZoneJournalReader() : cppobj(ZoneJournalReaderPtr()), base_obj(NULL) {};
+    ZoneJournalReaderPtr cppobj;
+    // This is a reference to a base object; if the object of this class
+    // depends on another object to be in scope during its lifetime,
+    // we use INCREF the base object upon creation, and DECREF it at
+    // the end of the destructor
+    // This is an optional argument to createXXX(). If NULL, it is ignored.
+    PyObject* base_obj;
+};
+
+// General creation and destruction
+int
+ZoneJournalReader_init(PyObject*, PyObject*, PyObject*) {
+    // can't be called directly
+    PyErr_SetString(PyExc_TypeError,
+                    "ZoneJournalReader cannot be constructed directly");
+
+    return (-1);
+}
+
+void
+ZoneJournalReader_destroy(PyObject* po_self) {
+    s_ZoneJournalReader* const self =
+        static_cast<s_ZoneJournalReader*>(po_self) ;
+    // cppobj is a shared ptr, but to make sure things are not destroyed in
+    // the wrong order, we reset it here.
+    self->cppobj.reset();
+    if (self->base_obj != NULL) {
+        Py_DECREF(self->base_obj);
+    }
+    Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneJournalReader_getNextDiff(PyObject* po_self, PyObject*) {
+    s_ZoneJournalReader* self = static_cast<s_ZoneJournalReader*>(po_self);
+    try {
+        isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextDiff();
+        if (!rrset) {
+            Py_RETURN_NONE;
+        }
+        return (createRRsetObject(*rrset));
+    } catch (const isc::InvalidOperation& ex) {
+        PyErr_SetString(PyExc_ValueError, ex.what());
+        return (NULL);
+    } catch (const isc::Exception& isce) {
+        PyErr_SetString(getDataSourceException("Error"), isce.what());
+        return (NULL);
+    } catch (const std::exception& exc) {
+        PyErr_SetString(getDataSourceException("Error"), exc.what());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(getDataSourceException("Error"),
+                        "Unexpected exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneJournalReader_iter(PyObject *self) {
+    Py_INCREF(self);
+    return (self);
+}
+
+PyObject*
+ZoneJournalReader_next(PyObject* self) {
+    PyObject* result = ZoneJournalReader_getNextDiff(self, NULL);
+    // iter_next must return NULL without error instead of Py_None
+    if (result == Py_None) {
+        Py_DECREF(result);
+        return (NULL);
+    } else {
+        return (result);
+    }
+}
+
+PyMethodDef ZoneJournalReader_methods[] = {
+    { "get_next_diff", ZoneJournalReader_getNextDiff, METH_NOARGS,
+      ZoneJournalReader_getNextDiff_doc },
+    { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject journal_reader_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "datasrc.ZoneJournalReader",
+    sizeof(s_ZoneJournalReader),             // tp_basicsize
+    0,                                  // tp_itemsize
+    ZoneJournalReader_destroy,          // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    NULL,                               // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    ZoneJournalReader_doc,
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    ZoneJournalReader_iter,                  // tp_iter
+    ZoneJournalReader_next,                  // tp_iternext
+    ZoneJournalReader_methods,               // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    ZoneJournalReader_init,             // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createZoneJournalReaderObject(ZoneJournalReaderPtr source,
+                              PyObject* base_obj)
+{
+    s_ZoneJournalReader* po = static_cast<s_ZoneJournalReader*>(
+        journal_reader_type.tp_alloc(&journal_reader_type, 0));
+    if (po != NULL) {
+        po->cppobj = source;
+        po->base_obj = base_obj;
+        if (base_obj != NULL) {
+            Py_INCREF(base_obj);
+        }
+    }
+    return (po);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.h b/src/lib/python/isc/datasrc/journal_reader_python.h
new file mode 100644
index 0000000..56344df
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.h
@@ -0,0 +1,47 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_JOURNAL_READER_H
+#define __PYTHON_DATASRC_JOURNAL_READER_H 1
+
+#include <Python.h>
+
+#include <datasrc/zone.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+extern PyTypeObject journal_reader_type;
+
+/// \brief Create a ZoneJournalReader python object
+///
+/// \param source The zone journal reader pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneJournalReader depends on
+///                 Its refcount is increased, and will be decreased when
+///                 this reader is destroyed, making sure that the
+///                 base object is never destroyed before this reader.
+PyObject* createZoneJournalReaderObject(
+    isc::datasrc::ZoneJournalReaderPtr source,
+    PyObject* base_obj = NULL);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_JOURNAL_READER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index fd63741..daa12fc 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -72,6 +72,14 @@ def create(cur):
                     rdtype STRING NOT NULL COLLATE NOCASE,
                     rdata STRING NOT NULL)""")
         cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
+        cur.execute("""CREATE TABLE diffs (id INTEGER PRIMARY KEY,
+                    zone_id INTEGER NOT NULL,
+                    version INTEGER NOT NULL,
+                    operation INTEGER NOT NULL,
+                    name STRING NOT NULL COLLATE NOCASE,
+                    rrtype STRING NOT NULL COLLATE NOCASE,
+                    ttl INTEGER NOT NULL,
+                    rdata STRING NOT NULL)""")
         row = [1]
     cur.execute("COMMIT TRANSACTION")
     return row
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 411b5cc..400abcf 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -6,6 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 
 EXTRA_DIST += testdata/brokendb.sqlite3
 EXTRA_DIST += testdata/example.com.sqlite3
+EXTRA_DIST += testdata/test.sqlite3.nodiffs
 CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index 02020e2..e46c177 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -15,8 +15,9 @@
 
 import isc.log
 import isc.datasrc
-from isc.datasrc import ZoneFinder
+from isc.datasrc import ZoneFinder, ZoneJournalReader
 from isc.dns import *
+from isc.testutils.rrset_utils import rrsets_equal
 import unittest
 import sqlite3
 import os
@@ -40,19 +41,6 @@ def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
             rrset_to_add.add_rdata(isc.dns.Rdata(rrtype, rrclass, rdata))
     rrset_list.append(rrset_to_add)
 
-# helper function, we have no direct rrset comparison atm
-def rrsets_equal(a, b):
-    # no accessor for sigs either (so this only checks name, class, type, ttl,
-    # and rdata)
-    # also, because of the fake data in rrsigs, if the type is rrsig, the
-    # rdata is not checked
-    return a.get_name() == b.get_name() and\
-           a.get_class() == b.get_class() and\
-           a.get_type() == b.get_type() and \
-           a.get_ttl() == b.get_ttl() and\
-           (a.get_type() == isc.dns.RRType.RRSIG() or
-            sorted(a.get_rdata()) == sorted(b.get_rdata()))
-
 # returns true if rrset is in expected_rrsets
 # will remove the rrset from expected_rrsets if found
 def check_for_rrset(expected_rrsets, rrset):
@@ -62,6 +50,13 @@ def check_for_rrset(expected_rrsets, rrset):
             return True
     return False
 
+def create_soa(serial):
+    soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+    soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+                        'ns1.example.org. admin.example.org. ' +
+                        str(serial) + ' 3600 1800 2419200 7200'))
+    return soa
+
 class DataSrcClient(unittest.TestCase):
 
     def test_(self):
@@ -606,14 +601,6 @@ class JournalWrite(unittest.TestCase):
             self.assertEqual(expected, actual)
         conn.close()
 
-    def create_soa(self, serial):
-        soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(),
-                    RRTTL(3600))
-        soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
-                            'ns1.example.org. admin.example.org. ' +
-                            str(serial) + ' 3600 1800 2419200 7200'))
-        return soa
-
     def create_a(self, address):
         a_rr = RRset(Name('www.example.org'), RRClass.IN(), RRType.A(),
                      RRTTL(3600))
@@ -624,9 +611,9 @@ class JournalWrite(unittest.TestCase):
         # This is a straightforward port of the C++ 'journal' test
         # Note: we add/delete 'out of zone' data (example.org in the
         # example.com zone for convenience.
-        self.updater.delete_rrset(self.create_soa(1234))
+        self.updater.delete_rrset(create_soa(1234))
         self.updater.delete_rrset(self.create_a('192.0.2.2'))
-        self.updater.add_rrset(self.create_soa(1235))
+        self.updater.add_rrset(create_soa(1235))
         self.updater.add_rrset(self.create_a('192.0.2.2'))
         self.updater.commit()
 
@@ -645,11 +632,11 @@ class JournalWrite(unittest.TestCase):
         # This is a straightforward port of the C++ 'journalMultiple' test
         expected = []
         for i in range(1, 100):
-            self.updater.delete_rrset(self.create_soa(1234 + i - 1))
+            self.updater.delete_rrset(create_soa(1234 + i - 1))
             expected.append(("example.org.", "SOA", 3600,
                              "ns1.example.org. admin.example.org. " +
                              str(1234 + i - 1) + " 3600 1800 2419200 7200"))
-            self.updater.add_rrset(self.create_soa(1234 + i))
+            self.updater.add_rrset(create_soa(1234 + i))
             expected.append(("example.org.", "SOA", 3600,
                              "ns1.example.org. admin.example.org. " +
                              str(1234 + i) + " 3600 1800 2419200 7200"))
@@ -665,27 +652,27 @@ class JournalWrite(unittest.TestCase):
         # Add before delete
         self.updater = self.dsc.get_updater(Name("example.com"), False, True)
         self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
-                          self.create_soa(1234))
+                          create_soa(1234))
         # Add A before SOA
         self.updater = self.dsc.get_updater(Name("example.com"), False, True)
-        self.updater.delete_rrset(self.create_soa(1234))
+        self.updater.delete_rrset(create_soa(1234))
         self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
                           self.create_a('192.0.2.1'))
         # Commit before add
         self.updater = self.dsc.get_updater(Name("example.com"), False, True)
-        self.updater.delete_rrset(self.create_soa(1234))
+        self.updater.delete_rrset(create_soa(1234))
         self.assertRaises(isc.datasrc.Error, self.updater.commit)
         # Delete two SOAs
         self.updater = self.dsc.get_updater(Name("example.com"), False, True)
-        self.updater.delete_rrset(self.create_soa(1234))
+        self.updater.delete_rrset(create_soa(1234))
         self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
-                          self.create_soa(1235))
+                          create_soa(1235))
         # Add two SOAs
         self.updater = self.dsc.get_updater(Name("example.com"), False, True)
-        self.updater.delete_rrset(self.create_soa(1234))
-        self.updater.add_rrset(self.create_soa(1235))
+        self.updater.delete_rrset(create_soa(1234))
+        self.updater.add_rrset(create_soa(1235))
         self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
-                          self.create_soa(1236))
+                          create_soa(1236))
 
     def test_journal_write_onerase(self):
         self.updater = None
@@ -700,6 +687,119 @@ class JournalWrite(unittest.TestCase):
         self.assertRaises(TypeError, dsc.get_updater, Name("example.com"),
                           1, True)
 
+class JournalRead(unittest.TestCase):
+    def setUp(self):
+        # Make a fresh copy of the writable database with all original content
+        self.zname = Name('example.com')
+        shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+        self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+                                                WRITE_ZONE_DB_CONFIG)
+        self.reader = None
+
+    def tearDown(self):
+        # Some tests leave the reader in the middle of sequence, holding
+        # the lock.  Since the unittest framework keeps each test object
+        # until the end of the entire tests, we need to make sure the reader
+        # is released at the end of each test.  The client shouldn't do harm
+        # but we clean it up, too, just in case.
+        self.dsc = None
+        self.reader = None
+
+    def make_simple_diff(self, begin_soa):
+        updater = self.dsc.get_updater(self.zname, False, True)
+        updater.delete_rrset(begin_soa)
+        updater.add_rrset(create_soa(1235))
+        updater.commit()
+
+    def test_journal_reader(self):
+        # This is a straightforward port of the C++ 'journalReader' test
+        self.make_simple_diff(create_soa(1234))
+        result, self.reader = self.dsc.get_journal_reader(self.zname, 1234,
+                                                          1235)
+        self.assertEqual(ZoneJournalReader.SUCCESS, result)
+        self.assertNotEqual(None, self.reader)
+        rrsets_equal(create_soa(1234), self.reader.get_next_diff())
+        rrsets_equal(create_soa(1235), self.reader.get_next_diff())
+        self.assertEqual(None, self.reader.get_next_diff())
+        self.assertRaises(ValueError, self.reader.get_next_diff)
+
+    def test_journal_reader_with_large_serial(self):
+        # similar to the previous one, but use a very large serial to check
+        # if the python wrapper code has unexpected integer overflow
+        self.make_simple_diff(create_soa(4294967295))
+        result, self.reader = self.dsc.get_journal_reader(self.zname,
+                                                          4294967295, 1235)
+        self.assertNotEqual(None, self.reader)
+        # dump to text and compare them in case create_soa happens to have
+        # an overflow bug
+        self.assertEqual('example.org. 3600 IN SOA ns1.example.org. ' + \
+                         'admin.example.org. 4294967295 3600 1800 ' + \
+                             '2419200 7200\n',
+                         self.reader.get_next_diff().to_text())
+
+    def test_journal_reader_large_journal(self):
+        # This is a straightforward port of the C++ 'readLargeJournal' test.
+        # In this test we use the ZoneJournalReader object as a Python
+        # iterator.
+        updater = self.dsc.get_updater(self.zname, False, True)
+        expected = []
+        for i in range(0, 100):
+            rrset = create_soa(1234 + i)
+            updater.delete_rrset(rrset)
+            expected.append(rrset)
+
+            rrset = create_soa(1234 + i + 1)
+            updater.add_rrset(rrset)
+            expected.append(rrset)
+
+        updater.commit()
+        _, self.reader = self.dsc.get_journal_reader(self.zname, 1234, 1334)
+        self.assertNotEqual(None, self.reader)
+        i = 0
+        for rr in self.reader:
+            self.assertNotEqual(len(expected), i)
+            rrsets_equal(expected[i], rr)
+            i += 1
+        self.assertEqual(len(expected), i)
+
+    def test_journal_reader_no_range(self):
+        # This is a straightforward port of the C++ 'readJournalForNoRange'
+        # test
+        self.make_simple_diff(create_soa(1234))
+        result, self.reader = self.dsc.get_journal_reader(self.zname, 1200,
+                                                          1235)
+        self.assertEqual(ZoneJournalReader.NO_SUCH_VERSION, result)
+        self.assertEqual(None, self.reader)
+
+    def test_journal_reader_no_zone(self):
+        # This is a straightforward port of the C++ 'journalReaderForNXZone'
+        # test
+        result, self.reader = self.dsc.get_journal_reader(Name('nosuchzone'),
+                                                          0, 1)
+        self.assertEqual(ZoneJournalReader.NO_SUCH_ZONE, result)
+        self.assertEqual(None, self.reader)
+
+    def test_journal_reader_bad_params(self):
+        self.assertRaises(TypeError, self.dsc.get_journal_reader,
+                          'example.com.', 0, 1)
+        self.assertRaises(TypeError, self.dsc.get_journal_reader,
+                          self.zname, 'must be int', 1)
+        self.assertRaises(TypeError, self.dsc.get_journal_reader,
+                          self.zname, 0, 'must be int')
+
+    def test_journal_reader_direct_construct(self):
+        # ZoneJournalReader can only be constructed via a factory
+        self.assertRaises(TypeError, ZoneJournalReader)
+
+    def test_journal_reader_old_schema(self):
+        # The database doesn't have a "diffs" table.
+        dbfile = TESTDATA_PATH + 'test.sqlite3.nodiffs'
+        client = isc.datasrc.DataSourceClient("sqlite3",
+                                              "{ \"database_file\": \"" + \
+                                                  dbfile + "\" }")
+        self.assertRaises(isc.datasrc.Error, client.get_journal_reader,
+                          self.zname, 0, 1)
+
 if __name__ == "__main__":
     isc.log.init("bind10")
     isc.log.resetUnitTestRootLogger()
diff --git a/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs
new file mode 100644
index 0000000..cc8cfc3
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs differ
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index c7112b3..2e4a28f 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -303,7 +303,8 @@ public:
 extern PyTypeObject logger_type;
 
 int
-Logger_init(LoggerWrapper* self, PyObject* args) {
+Logger_init(PyObject* po_self, PyObject* args, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     const char* name;
     if (!PyArg_ParseTuple(args, "s", &name)) {
         return (-1);
@@ -323,7 +324,9 @@ Logger_init(LoggerWrapper* self, PyObject* args) {
 }
 
 void
-Logger_destroy(LoggerWrapper* const self) {
+//Logger_destroy(LoggerWrapper* const self) {
+Logger_destroy(PyObject* po_self) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     delete self->logger_;
     self->logger_ = NULL;
     Py_TYPE(self)->tp_free(self);
@@ -351,7 +354,8 @@ severityToText(const Severity& severity) {
 }
 
 PyObject*
-Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveSeverity(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     try {
         return (Py_BuildValue("s",
                               severityToText(
@@ -368,7 +372,8 @@ Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
 }
 
 PyObject*
-Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveDebugLevel(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     try {
         return (Py_BuildValue("i", self->logger_->getEffectiveDebugLevel()));
     }
@@ -383,7 +388,8 @@ Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
 }
 
 PyObject*
-Logger_setSeverity(LoggerWrapper* self, PyObject* args) {
+Logger_setSeverity(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     const char* severity;
     int dbgLevel = 0;
     if (!PyArg_ParseTuple(args, "z|i", &severity, &dbgLevel)) {
@@ -425,27 +431,32 @@ Logger_isLevelEnabled(LoggerWrapper* self, FPtr function) {
 }
 
 PyObject*
-Logger_isInfoEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isInfoEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isInfoEnabled));
 }
 
 PyObject*
-Logger_isWarnEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isWarnEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isWarnEnabled));
 }
 
 PyObject*
-Logger_isErrorEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isErrorEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isErrorEnabled));
 }
 
 PyObject*
-Logger_isFatalEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isFatalEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isFatalEnabled));
 }
 
 PyObject*
-Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
+Logger_isDebugEnabled(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     int level = MIN_DEBUG_LEVEL;
     if (!PyArg_ParseTuple(args, "|i", &level)) {
         return (NULL);
@@ -470,53 +481,39 @@ Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
 
 string
 objectToStr(PyObject* object, bool convert) {
-    PyObject* cleanup(NULL);
+    PyObjectContainer objstr_container;
     if (convert) {
-        object = cleanup = PyObject_Str(object);
-        if (object == NULL) {
+        PyObject* text_obj = PyObject_Str(object);
+        if (text_obj == NULL) {
+            // PyObject_Str could fail for various reasons, including because
+            // the object cannot be converted to a string.  We exit with
+            // InternalError to preserve the PyErr set in PyObject_Str.
             throw InternalError();
         }
-    }
-    const char* value;
-    PyObject* tuple(Py_BuildValue("(O)", object));
-    if (tuple == NULL) {
-        if (cleanup != NULL) {
-            Py_DECREF(cleanup);
-        }
-        throw InternalError();
+        objstr_container.reset(text_obj);
+        object = objstr_container.get();
     }
 
-    if (!PyArg_ParseTuple(tuple, "s", &value)) {
-        Py_DECREF(tuple);
-        if (cleanup != NULL) {
-            Py_DECREF(cleanup);
-        }
+    PyObjectContainer tuple_container(Py_BuildValue("(O)", object));
+    const char* value;
+    if (!PyArg_ParseTuple(tuple_container.get(), "s", &value)) {
         throw InternalError();
     }
-    string result(value);
-    Py_DECREF(tuple);
-    if (cleanup != NULL) {
-        Py_DECREF(cleanup);
-    }
-    return (result);
+    return (string(value));
 }
 
 // Generic function to output the logging message. Called by the real functions.
-template<class Function>
+template <class Function>
 PyObject*
 Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
     try {
-        Py_ssize_t number(PyObject_Length(args));
+        const Py_ssize_t number(PyObject_Length(args));
         if (number < 0) {
             return (NULL);
         }
 
         // Which argument is the first to format?
-        size_t start(1);
-        if (dbgLevel) {
-            start ++;
-        }
-
+        const size_t start = dbgLevel ? 2 : 1;
         if (number < start) {
             return (PyErr_Format(PyExc_TypeError, "Too few arguments to "
                                  "logging call, at least %zu needed and %zd "
@@ -524,18 +521,10 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
         }
 
         // Extract the fixed arguments
-        PyObject *midO(PySequence_GetItem(args, start - 1));
-        if (midO == NULL) {
-            return (NULL);
-        }
-        string mid(objectToStr(midO, false));
         long dbg(0);
         if (dbgLevel) {
-            PyObject *dbgO(PySequence_GetItem(args, 0));
-            if (dbgO == NULL) {
-                return (NULL);
-            }
-            dbg = PyLong_AsLong(dbgO);
+            PyObjectContainer dbg_container(PySequence_GetItem(args, 0));
+            dbg = PyLong_AsLong(dbg_container.get());
             if (PyErr_Occurred()) {
                 return (NULL);
             }
@@ -544,16 +533,16 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
         // We create the logging message right now. If we fail to convert a
         // parameter to string, at least the part that we already did will
         // be output
+        PyObjectContainer msgid_container(PySequence_GetItem(args, start - 1));
+        const string mid(objectToStr(msgid_container.get(), false));
         Logger::Formatter formatter(function(dbg, mid.c_str()));
 
         // Now process the rest of parameters, convert each to string and put
         // into the formatter. It will print itself in the end.
         for (size_t i(start); i < number; ++ i) {
-            PyObject* param(PySequence_GetItem(args, i));
-            if (param == NULL) {
-                return (NULL);
-            }
-            formatter = formatter.arg(objectToStr(param, true));
+            PyObjectContainer param_container(PySequence_GetItem(args, i));
+            formatter = formatter.arg(objectToStr(param_container.get(),
+                                                  true));
         }
         Py_RETURN_NONE;
     }
@@ -573,72 +562,74 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
 // Now map the functions into the performOutput. I wish C++ could do
 // functional programming.
 PyObject*
-Logger_debug(LoggerWrapper* self, PyObject* args) {
+Logger_debug(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::debug, self->logger_, _1, _2),
                                  args, true));
 }
 
 PyObject*
-Logger_info(LoggerWrapper* self, PyObject* args) {
+Logger_info(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::info, self->logger_, _2),
                                  args, false));
 }
 
 PyObject*
-Logger_warn(LoggerWrapper* self, PyObject* args) {
+Logger_warn(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::warn, self->logger_, _2),
                                  args, false));
 }
 
 PyObject*
-Logger_error(LoggerWrapper* self, PyObject* args) {
+Logger_error(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::error, self->logger_, _2),
                                  args, false));
 }
 
 PyObject*
-Logger_fatal(LoggerWrapper* self, PyObject* args) {
+Logger_fatal(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::fatal, self->logger_, _2),
                                  args, false));
 }
 
 PyMethodDef loggerMethods[] = {
-    { "get_effective_severity",
-        reinterpret_cast<PyCFunction>(Logger_getEffectiveSeverity),
-        METH_NOARGS, "Returns the effective logging severity as string" },
-    { "get_effective_debug_level",
-        reinterpret_cast<PyCFunction>(Logger_getEffectiveDebugLevel),
-        METH_NOARGS, "Returns the current debug level." },
-    { "set_severity",
-        reinterpret_cast<PyCFunction>(Logger_setSeverity), METH_VARARGS,
+    { "get_effective_severity", Logger_getEffectiveSeverity, METH_NOARGS,
+        "Returns the effective logging severity as string" },
+    { "get_effective_debug_level", Logger_getEffectiveDebugLevel, METH_NOARGS,
+        "Returns the current debug level." },
+    { "set_severity", Logger_setSeverity, METH_VARARGS,
         "Sets the severity of a logger. The parameters are severity as a "
         "string and, optionally, a debug level (integer in range 0-99). "
         "The severity may be NULL, in which case an inherited value is taken."
     },
-    { "is_debug_enabled", reinterpret_cast<PyCFunction>(Logger_isDebugEnabled),
-        METH_VARARGS, "Returns if the logger would log debug message now. "
+    { "is_debug_enabled", Logger_isDebugEnabled, METH_VARARGS,
+      "Returns if the logger would log debug message now. "
             "You can provide a desired debug level." },
-    { "is_info_enabled", reinterpret_cast<PyCFunction>(Logger_isInfoEnabled),
-        METH_NOARGS, "Returns if the logger would log info message now." },
-    { "is_warn_enabled", reinterpret_cast<PyCFunction>(Logger_isWarnEnabled),
-        METH_NOARGS, "Returns if the logger would log warn message now." },
-    { "is_error_enabled", reinterpret_cast<PyCFunction>(Logger_isErrorEnabled),
-        METH_NOARGS, "Returns if the logger would log error message now." },
-    { "is_fatal_enabled", reinterpret_cast<PyCFunction>(Logger_isFatalEnabled),
-        METH_NOARGS, "Returns if the logger would log fatal message now." },
-    { "debug", reinterpret_cast<PyCFunction>(Logger_debug), METH_VARARGS,
+    { "is_info_enabled", Logger_isInfoEnabled, METH_NOARGS,
+      "Returns if the logger would log info message now." },
+    { "is_warn_enabled", Logger_isWarnEnabled, METH_NOARGS,
+      "Returns if the logger would log warn message now." },
+    { "is_error_enabled", Logger_isErrorEnabled, METH_NOARGS,
+      "Returns if the logger would log error message now." },
+    { "is_fatal_enabled", Logger_isFatalEnabled, METH_NOARGS,
+      "Returns if the logger would log fatal message now." },
+    { "debug", Logger_debug, METH_VARARGS,
         "Logs a debug-severity message. It takes the debug level, message ID "
         "and any number of stringifiable arguments to the message." },
-    { "info", reinterpret_cast<PyCFunction>(Logger_info), METH_VARARGS,
+    { "info", Logger_info, METH_VARARGS,
         "Logs a info-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
-    { "warn", reinterpret_cast<PyCFunction>(Logger_warn), METH_VARARGS,
+    { "warn", Logger_warn, METH_VARARGS,
         "Logs a warn-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
-    { "error", reinterpret_cast<PyCFunction>(Logger_error), METH_VARARGS,
+    { "error", Logger_error, METH_VARARGS,
         "Logs a error-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
-    { "fatal", reinterpret_cast<PyCFunction>(Logger_fatal), METH_VARARGS,
+    { "fatal", Logger_fatal, METH_VARARGS,
         "Logs a fatal-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
     { NULL, NULL, 0, NULL }
@@ -649,7 +640,7 @@ PyTypeObject logger_type = {
     "isc.log.Logger",
     sizeof(LoggerWrapper),                 // tp_basicsize
     0,                                  // tp_itemsize
-    reinterpret_cast<destructor>(Logger_destroy),       // tp_dealloc
+    Logger_destroy,                     // tp_dealloc
     NULL,                               // tp_print
     NULL,                               // tp_getattr
     NULL,                               // tp_setattr
@@ -681,7 +672,7 @@ PyTypeObject logger_type = {
     NULL,                               // tp_descr_get
     NULL,                               // tp_descr_set
     0,                                  // tp_dictoffset
-    reinterpret_cast<initproc>(Logger_init),            // tp_init
+    Logger_init,                        // tp_init
     NULL,                               // tp_alloc
     PyType_GenericNew,                  // tp_new
     NULL,                               // tp_free
@@ -718,21 +709,21 @@ PyInit_log(void) {
         return (NULL);
     }
 
-    if (PyType_Ready(&logger_type) < 0) {
-        return (NULL);
-    }
-
-    if (PyModule_AddObject(mod, "Logger",
-                           static_cast<PyObject*>(static_cast<void*>(
-                               &logger_type))) < 0) {
-        return (NULL);
-    }
-
-    // Add in the definitions of the standard debug levels.  These can then
-    // be referred to in Python through the constants log.DBGLVL_XXX.
+    // Finalize logger class and add in the definitions of the standard debug
+    // levels.  These can then be referred to in Python through the constants
+    // log.DBGLVL_XXX.
     // N.B. These should be kept in sync with the constants defined in
     // log_dbglevels.h.
     try {
+        if (PyType_Ready(&logger_type) < 0) {
+            throw InternalError();
+        }
+        void* p = &logger_type;
+        if (PyModule_AddObject(mod, "Logger",
+                               static_cast<PyObject*>(p)) < 0) {
+            throw InternalError();
+        }
+
         installClassVariable(logger_type, "DBGLVL_START_SHUT",
                              Py_BuildValue("I", DBGLVL_START_SHUT));
         installClassVariable(logger_type, "DBGLVL_COMMAND",
@@ -747,15 +738,20 @@ PyInit_log(void) {
                              Py_BuildValue("I", DBGLVL_TRACE_DETAIL));
         installClassVariable(logger_type, "DBGLVL_TRACE_DETAIL_DATA",
                              Py_BuildValue("I", DBGLVL_TRACE_DETAIL_DATA));
+    } catch (const InternalError&) {
+        Py_DECREF(mod);
+        return (NULL);
     } catch (const std::exception& ex) {
         const std::string ex_what =
             "Unexpected failure in Log initialization: " +
             std::string(ex.what());
         PyErr_SetString(PyExc_SystemError, ex_what.c_str());
+        Py_DECREF(mod);
         return (NULL);
     } catch (...) {
         PyErr_SetString(PyExc_SystemError,
                         "Unexpected failure in Log initialization");
+        Py_DECREF(mod);
         return (NULL);
     }
 
diff --git a/src/lib/python/isc/log/tests/log_test.py b/src/lib/python/isc/log/tests/log_test.py
index 8deaeae..1337654 100644
--- a/src/lib/python/isc/log/tests/log_test.py
+++ b/src/lib/python/isc/log/tests/log_test.py
@@ -17,6 +17,7 @@
 import isc.log
 import unittest
 import json
+import sys
 import bind10_config
 from isc.config.ccsession import path_search
 
@@ -89,6 +90,7 @@ class Logger(unittest.TestCase):
     def setUp(self):
         isc.log.init("root", "DEBUG", 50)
         self.sevs = ['INFO', 'WARN', 'ERROR', 'FATAL']
+        self.TEST_MSG = isc.log.create_message('TEST_MESSAGE', '%1')
 
     # Checks defaults of the logger
     def defaults(self, logger):
@@ -169,5 +171,34 @@ class Logger(unittest.TestCase):
         logger = isc.log.Logger("child")
         self.assertEqual(logger.DBGLVL_COMMAND, 10)
 
+    def test_param_reference(self):
+        """
+        Check whether passing a parameter to a logger causes a reference leak.
+        """
+        class LogParam:
+            def __str__(self):
+                return 'LogParam'
+        logger = isc.log.Logger("child")
+        param = LogParam()
+        orig_msgrefcnt = sys.getrefcount(param)
+        orig_idrefcnt = sys.getrefcount(self.TEST_MSG)
+        logger.info(self.TEST_MSG, param);
+        self.assertEqual(sys.getrefcount(self.TEST_MSG), orig_idrefcnt)
+        self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+        # intentionally pass an invalid type for debug level.  It will
+        # result in TypeError.  The passed object still shouldn't leak a
+        # reference.
+        self.assertRaises(TypeError, logger.debug, param, self.TEST_MSG, param)
+        self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+    def test_bad_parameter(self):
+        # a log parameter cannot be converted to a string object.
+        class LogParam:
+            def __str__(self):
+                raise ValueError("LogParam can't be converted to string")
+        logger = isc.log.Logger("child")
+        self.assertRaises(ValueError, logger.info, self.TEST_MSG, LogParam())
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/src/lib/python/isc/testutils/Makefile.am b/src/lib/python/isc/testutils/Makefile.am
index 0b08257..5479d83 100644
--- a/src/lib/python/isc/testutils/Makefile.am
+++ b/src/lib/python/isc/testutils/Makefile.am
@@ -1,4 +1,4 @@
-EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py
+EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py rrset_utils.py
 
 CLEANDIRS = __pycache__
 
diff --git a/src/lib/python/isc/testutils/rrset_utils.py b/src/lib/python/isc/testutils/rrset_utils.py
new file mode 100644
index 0000000..8c22d92
--- /dev/null
+++ b/src/lib/python/isc/testutils/rrset_utils.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Utility functions handling DNS RRsets commonly used for tests'''
+
+from isc.dns import *
+
+def rrsets_equal(a, b):
+    '''Compare two RRsets, return True if equal, otherwise False
+
+    We provide this function as part of test utils as we have no direct rrset
+    comparison atm.  There's no accessor for sigs either (so this only checks
+    name, class, type, ttl, and rdata).
+    Also, since we often use fake data in RRSIGs, RRSIG RDATA are not checked.
+
+    '''
+    return a.get_name() == b.get_name() and \
+           a.get_class() == b.get_class() and \
+           a.get_type() == b.get_type() and \
+           a.get_ttl() == b.get_ttl() and \
+           (a.get_type() == RRType.RRSIG() or
+            sorted(a.get_rdata()) == sorted(b.get_rdata()))
+
+# The following are short cut utilities to create an RRset of a specific
+# RR type with one RDATA.  Many of the RR parameters are common in most
+# tests, so we define default values for them for convenience.
+
+def create_a(name, address, ttl=3600):
+    rrset = RRset(name, RRClass.IN(), RRType.A(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+    return rrset
+
+def create_aaaa(name, address, ttl=3600):
+    rrset = RRset(name, RRClass.IN(), RRType.AAAA(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.AAAA(), RRClass.IN(), address))
+    return rrset
+
+def create_ns(nsname, name=Name('example.com'), ttl=3600):
+    '''For convenience we use a default name often used as a zone name'''
+    rrset = RRset(name, RRClass.IN(), RRType.NS(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.NS(), RRClass.IN(), nsname))
+    return rrset
+
+def create_soa(serial, name=Name('example.com'), ttl=3600):
+    '''For convenience we use a default name often used as a zone name'''
+
+    rrset = RRset(name, RRClass.IN(), RRType.SOA(), RRTTL(ttl))
+    rdata_str = 'master.example.com. admin.example.com. ' + \
+        str(serial) + ' 3600 1800 2419200 7200'
+    rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), rdata_str))
+    return rrset
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
index a2d9a7d..38b7f39 100644
--- a/src/lib/python/isc/xfrin/diff.py
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -59,7 +59,7 @@ class Diff:
     the changes to underlying data source right away, but keeps them for
     a while.
     """
-    def __init__(self, ds_client, zone, replace=False):
+    def __init__(self, ds_client, zone, replace=False, journaling=False):
         """
         Initializes the diff to a ready state. It checks the zone exists
         in the datasource and if not, NoSuchZone is raised. This also creates
@@ -67,13 +67,25 @@ class Diff:
 
         The ds_client is the datasource client containing the zone. Zone is
         isc.dns.Name object representing the name of the zone (its apex).
-        If replace is true, the content of the whole zone is wiped out before
+        If replace is True, the content of the whole zone is wiped out before
         applying the diff.
 
+        If journaling is True, the history of subsequent updates will be
+        recorded as well as the updates themselves as long as the underlying
+        data source support the journaling.  If the data source allows
+        incoming updates but does not support journaling, the Diff object
+        will still continue applying the diffs with disabling journaling.
+
         You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented
         exceptions.
         """
-        self.__updater = ds_client.get_updater(zone, replace)
+        try:
+            self.__updater = ds_client.get_updater(zone, replace, journaling)
+        except isc.datasrc.NotImplemented as ex:
+            if not journaling:
+                raise ex
+            self.__updater = ds_client.get_updater(zone, replace, False)
+            logger.info(LIBXFRIN_NO_JOURNAL, zone, ds_client)
         if self.__updater is None:
             # The no such zone case
             raise NoSuchZone("Zone " + str(zone) +
diff --git a/src/lib/python/isc/xfrin/libxfrin_messages.mes b/src/lib/python/isc/xfrin/libxfrin_messages.mes
index be943c8..203e31f 100644
--- a/src/lib/python/isc/xfrin/libxfrin_messages.mes
+++ b/src/lib/python/isc/xfrin/libxfrin_messages.mes
@@ -19,3 +19,13 @@
 The xfrin module received an update containing multiple rdata changes for the
 same RRset. But the TTLs of these don't match each other. As we combine them
 together, the later one get's overwritten to the earlier one in the sequence.
+
+% LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled.  At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR).  Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
index 9fab890..9944404 100644
--- a/src/lib/python/isc/xfrin/tests/diff_tests.py
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -15,6 +15,7 @@
 
 import isc.log
 import unittest
+import isc.datasrc
 from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
 from isc.xfrin.diff import Diff, NoSuchZone
 
@@ -127,7 +128,7 @@ class DiffTest(unittest.TestCase):
         """
         return self.__rrclass
 
-    def get_updater(self, zone_name, replace):
+    def get_updater(self, zone_name, replace, journaling=False):
         """
         This one pretends this is the data source client and serves
         getting an updater.
@@ -138,11 +139,20 @@ class DiffTest(unittest.TestCase):
         # The diff should not delete the old data.
         self.assertEqual(self.__should_replace, replace)
         self.__updater_requested = True
-        # Pretend this zone doesn't exist
         if zone_name == Name('none.example.org.'):
+            # Pretend this zone doesn't exist
             return None
+
+        # If journaling is enabled, record the fact; for a special zone
+        # pretend that we don't support journaling.
+        if journaling:
+            if zone_name == Name('nodiff.example.org'):
+                raise isc.datasrc.NotImplemented('journaling not supported')
+            self.__journaling_enabled = True
         else:
-            return self
+            self.__journaling_enabled = False
+
+        return self
 
     def test_create(self):
         """
@@ -152,6 +162,8 @@ class DiffTest(unittest.TestCase):
         diff = Diff(self, Name('example.org.'))
         self.assertTrue(self.__updater_requested)
         self.assertEqual([], diff.get_buffer())
+        # By default journaling is disabled
+        self.assertFalse(self.__journaling_enabled)
 
     def test_create_nonexist(self):
         """
@@ -161,6 +173,14 @@ class DiffTest(unittest.TestCase):
         self.assertRaises(NoSuchZone, Diff, self, Name('none.example.org.'))
         self.assertTrue(self.__updater_requested)
 
+    def test_create_withjournal(self):
+        Diff(self, Name('example.org'), False, True)
+        self.assertTrue(self.__journaling_enabled)
+
+    def test_create_nojournal(self):
+        Diff(self, Name('nodiff.example.org'), False, True)
+        self.assertFalse(self.__journaling_enabled)
+
     def __data_common(self, diff, method, operation):
         """
         Common part of test for test_add and test_delte.




More information about the bind10-changes mailing list