BIND 10 trac1130, updated. 77030a4789285a3f08fbdd9621a384a9e008f4a8 [trac1130] Update ChangeLog file
BIND 10 source code commits
bind10-changes at lists.isc.org
Wed Aug 17 02:04:24 UTC 2011
The branch, trac1130 has been updated
via 77030a4789285a3f08fbdd9621a384a9e008f4a8 (commit)
via a030033e5a53dd18157509c6c101340688d16011 (commit)
via 485e0ba7f7fe11e4d28e3eec2be835157521a6e9 (commit)
via 6a55aa002c8f3b701dbb8291cd9a8e21534c6974 (commit)
via 7cdda20613f7ed7b18e7fe210ae0f6a87054dbf3 (commit)
via 745ebcec892cb27feec663de9218ae3647c7b8a5 (commit)
via 1e702fae4c9adbd7134a739dee28c868a15f0b3e (commit)
via 44bd4bc6dc7df56905071933a542e00e91f84837 (commit)
via 006d0fab3f44ec9caa2b23da3866bbbd841cd5d3 (commit)
via 68da925f226966a2760a193e9f9a3cdbdfcfacec (commit)
via 09e8c50958a1fca313c2be427c2991c39798f90f (commit)
via d1b580f1780e5ebdbbf6fe8655cc923fbd5c02de (commit)
via 98e74ad62b23ce33f66e3841431511136bc1c2f8 (commit)
via 0fe4f0151ae7a994aaf305e7985d4ba9f992e482 (commit)
via 9df1f04f8b1f7091ab32dcd56fb6e47e3e96d5a7 (commit)
via 691c232b2655673ac352beafc0bfba4bc966f8f8 (commit)
via 6ad78d124740f1ea18f6f93721ec6f152364e878 (commit)
via 5253640054d48f7816aa00c803f5bc593c0c12c1 (commit)
via ce052cd92cd128ea3db5a8f154bd151956c2920c (commit)
via 6dfeded7b6f2f78a2d45fa54543a5962bdc6c035 (commit)
via 810c79d6d9b8efbc12ec8e1ad727cf002f2dedc6 (commit)
via c74d3b7f393f3934bae22fc9d3a4a49e2211aadb (commit)
via e074df43e95dc002374de30503ba44e203b04788 (commit)
via b06a3e2ba1febb9e34458c5106f8d1629a191d5f (commit)
via 56af86bdab9c9700a13cc7d622653d34cbaa72f3 (commit)
via 4cbf309be8a302afe3bc041da11c24b593464157 (commit)
via b3bcd825cfb9c19a62a7db4d12717e85aca0b1e8 (commit)
via 3f5a0900a568436b011fc14b628b71bb130ae5f7 (commit)
via 5de7909a21a077238567b64e489ed5345824b2a0 (commit)
via b4a1bc9ba28398dbd5fdbe4ee4f118a2faf59efa (commit)
via 3ce7b09732207eac03998fa5e267672760e475c9 (commit)
via 004afad6ea3fba7c8dd7730428b50fd770daec66 (commit)
via f20be125d667bceea0d940fc5fabf87b2eef86cd (commit)
via fcc707041d663b98c1992cdd1402cc183155d3c0 (commit)
via da5d5926cb26ca8dbdae119c03687cd3415f6638 (commit)
via 0314c7bb66b85775dea73c95463eed88e9e286c3 (commit)
via b8cecbbd905c10d28bcb905def7160d9e406dac4 (commit)
via 7a31e95e63013a298b449573cc5336bcd64a0419 (commit)
via e18a678b62d03729f065c40650d7183e2f260b22 (commit)
via 1d1a87939a010bd16ed23cd817261e9a655bf98f (commit)
via c6948a6df9aeedd3753bc4c5e3a553088cd98f63 (commit)
via db0371fc9e5c7a85ab524ab7bc0b8169b9ba0486 (commit)
via e906efc3747f052128eef50bed0107a0d53546c8 (commit)
via d86a9dceaddf5a2cee44170e6e677f492df5e0ea (commit)
via 4c2732cbf0bb7384ed61ab3604855f143a0c6c5d (commit)
via aaffb9c83c0fe59d9c7d590c5bea559ed8876269 (commit)
via e8a22472e58bfc7df4a661d665152fe4d70454a6 (commit)
via 2c22d334a05ec1e77299a6c55252f1d1c33082af (commit)
via 8a24b9066537caf373d0cfc11dca855eb6c3e4d9 (commit)
via 7275c59de54593d3baca81345226dda2d3a19c30 (commit)
via bcf37a11b08922d69d02fa2ea1b280b2fa2c21e0 (commit)
via a142fa6302e1e0ea2ad1c9faf59d6a70a53a6489 (commit)
via ae8748f77a0261623216b1a11f9d979f555fe892 (commit)
via d0d5a67123b8009e89e84515eee4f93b37ec8497 (commit)
via a9a976d2a5871f1501018d697d3afd299ceec5da (commit)
via df9a8f921f0d20bd70c519218335357297bffa7d (commit)
via e95625332a20fb50afe43da2db0cab507efe8ebe (commit)
via 28cad73dff9dae43a38ad7dafbee406c690fb77c (commit)
via 4de3a5bdf367d87247cb9138f8929ab4798f014e (commit)
via aa108cc824539a1d32a4aa2f46f9e58171074a9e (commit)
via 691328d91b4c4d15ace467ca47a3c987a9fb52b9 (commit)
via c06463cf96ea7401325a208af8ba457e661d1cec (commit)
via c074f6e0b72c3facf6b325b17dea1ca13a2788cc (commit)
via daa1d6dd07292142d3dec5928583b0ab1da89adf (commit)
via e7b4337aeaa760947e8e7906e64077ad7aaadc66 (commit)
via 0b235902f38d611606d44661506f32baf266fdda (commit)
via c19a295eb4125b4d2a391de65972271002412258 (commit)
via 9261da8717a433cf20218af08d3642fbeffb7d4b (commit)
via d4078d52343247b07c47370b497927a3a47a4f9a (commit)
via 1aa728ddf691657611680385c920e3a7bd5fee12 (commit)
via 1768e822df82943f075ebed023b72d225b3b0216 (commit)
via 326885a3f98c49a848a67dc48db693b8bcc7b508 (commit)
via 3e0a0e157bc2a1ca7ad9efb566755ec61eedd180 (commit)
via 93a7f7d1495795b731242e270b6dc76b1ad6b0dc (commit)
via 87e410c0061df72fe69fb47c7456ae54c609b219 (commit)
via 1ddc6158f7544c95742757654863379fff847771 (commit)
via 0f787178301c7cbf59fc7c516ebe920a33e22429 (commit)
via 9b6993b6f6507fab1bc8956f727cca60c8c9243a (commit)
via 7bda7762ab9243404bbd0964908b3365cd052969 (commit)
via 7cf7ec751e4f776dbb60cd290cea4fb217173cdb (commit)
via d5ded106a85afaf695e59941bd382bca4811fe46 (commit)
via c4ef641d07c7ddfd6b86d6b5ae944ab9a30d6990 (commit)
via e443a325b31edefe9cd4da71e10497db6544468c (commit)
via cddcafd790288f5e666198effa142132b6fc43fa (commit)
via ab5085e81007711f9d18ed77f3d78f51cf37545c (commit)
via 5e621bce015d2847104303fba574989fdf0399e0 (commit)
via 7d5c3d56743fb696405f509663b3e1558fa72e25 (commit)
via 990247bfd2248be5ae4293928101eec87e1997e9 (commit)
via e9e36557849ba6b650e503841596bd31034c1936 (commit)
via b9f87e9332895be6915e2f2960a2e921375e8e7f (commit)
via 978ae99ac4aa211ba4ba960f56bb6cdd84b648ae (commit)
via 2e60562cfda15fad37550ce5996e942084131d1c (commit)
via 2f49e3eb0ddf31d601184b516b7f44ab4ea6eece (commit)
via 17a87c6bb9d16e992fadd47b11b3eb26af54ac69 (commit)
via 2cc500af0929c1f268aeb6f8480bc428af70f4c4 (commit)
via e021b84f7fc20b3e3927093ed87e9c873d33a443 (commit)
via c46b0bc28c22f2ae4b46c592f450e745774846d4 (commit)
via 7740b9810bc093a9083e8c3404afc627c8b78242 (commit)
via 62432e71ef943744fd4ca9ce216da1b0a7250573 (commit)
via 005c77dfe53b54cef92ce51d91f615eb9c2769c4 (commit)
via ce3bc8504d765ecc9b453398efb18662bd4f277a (commit)
via 94fc6d8d303053c47064c9408947cd49a8e11975 (commit)
via c5cf3cc081042fec0e2baea7cdf7f22a8a84664a (commit)
via 779e145d8f15ad9975f6ca689e6a595ea0a3de4b (commit)
via adcbbb141bdb09a6fd999f3369e15c2881f843ba (commit)
via 80014655d76e758868e8e1ed36472be9a606eb2a (commit)
via 959dc163810ac286e01d0163624f5bbad5b82c55 (commit)
via 1d74428fb7a817790c397338db92d102e2113e1c (commit)
via d5e24e94bbd581098e460fc3a0b437478340c876 (commit)
via 4cd96de7e7d4ac12c38b45efe7b3ee0ed331d3b9 (commit)
via 914fe9bc05003defeff70acb84a52e86fb9ced4c (commit)
via c6ca831b3f171da96fad75c21dffbd2bed71e297 (commit)
via 8ce8e05a403440e7f2323e9d43dca08be1cf8a94 (commit)
via 414b25d4bfa89e0609cd3c8c3a6e610681f4c929 (commit)
via f57e8133a7af31a59578ac2cd50dd20418cb8fbc (commit)
via 85a14b1daffb3a20e9e510b73d25c71ba95cc350 (commit)
via 774a56a8beeef3a73258910b12cace20443a1bcb (commit)
via f14bc0502c3c4d2ffd609b110771ca1fa752b68e (commit)
via f75d5bd488669426794d086b80568ef0a7a4afe6 (commit)
via d719b47c4131e2120305cee60395c0a88f5aca25 (commit)
via c7db1351d3b1c25bfc31ed9e7b6b491e6bcb1555 (commit)
via ac15a86eb62832cc22533bc33b802ea297666ad5 (commit)
via 0af72968bfd192fa418551ae75def455adcfbb4b (commit)
via 977f822d94c59bfd9d56373404291fc85218b1d6 (commit)
via d00042b03e1f85cd1d8ea8340d5ac72222e5123e (commit)
via 0081ce40b832f4c5abaeb0316736d772aec3f08d (commit)
via f03688da19c21b4d46761cc4ed9da981cebe43c1 (commit)
via eb8ba927115b091bb407cbc29ad2d07dfed318f1 (commit)
via b19a36e30d0d3829c68f2e0300ea1487da242af8 (commit)
via 12b3473393fb7a471fc7d928476b0ba66da145e9 (commit)
via cfd1d9e142fa2fd8b21f74de0e4a0109e0a04439 (commit)
via 5951ef6faaffcff62d9a9963260a932666e3decb (commit)
via f82dc7b09f470f79ed2bf099216fa64c76528d3b (commit)
via f6a1807c25d85a0ca762bfa276ebac4a3430e7c7 (commit)
via 65e4595c21bf9c01fb0b7da61577ae8a79d29c30 (commit)
via 9351dbcc88ccdd6aa83d72f432f19a76c031124b (commit)
via 46b961d69aff3a2e4d1cb7f3d0910bfcc66d1e19 (commit)
via 97153d16eb9ecb7281ed9dc76783091964e769dd (commit)
via 56083614ae0e8c5177786528e85d348686bf9bc2 (commit)
via c9d7e29600f7a80094bcda2c3bd87d8f07d813e9 (commit)
via 2b6bcb84a17fc98ea0ea87df65e6a77829857ecd (commit)
via cc6d6b14603924a4ef2d86dfaf758447cca6a7ff (commit)
via 69642fb8f55cb4741f977d3fbaacd5d12d742625 (commit)
via 86257c05755c8adbb19ce684546b718dd48a5ef8 (commit)
via 5f13949918d125f851bd2ba8ab092c301835d3ac (commit)
via 7e1e150e056d0dcf5a58b2a8036f47c2e5dac820 (commit)
via 15428e5a9c1bb01f5e7a04979c17ec5f1de9d1db (commit)
via ac9fd0a240cbfa8c448cb01bb69ac92313eb7e56 (commit)
via ce0544bd0852415891cb31e0c1b7d0ba0b3d19f3 (commit)
via dba1e2c7884b5bc68f945fd5d2dd500f9a258c6b (commit)
via bc281e8b48c92102d3c64318e07598c8e96e493c (commit)
via 82667b0cdd6592053f5b2f4cfa1cbd0ec92db0b2 (commit)
via 71b0ae9ddbcbf4093900ff879e2e1c82be89867f (commit)
via 1b96c2563342098e05ac4b240c66e60222249cf4 (commit)
via ff14da4f9b706a47f152491eae60586b75430c6e (commit)
via d23cde8c4285cf55b007b300123c41fa852d38d9 (commit)
via 885d7987eefb0b8b694626b0831ed93123fb8d8d (commit)
via fea1f88cd0bb5bdeefc6048b122da4328635163d (commit)
via 3702df52de21023d90052afdc54732d9ad285b39 (commit)
via e47f04584b00f6d7b5c8bf9e8ae6af9aaa6831fd (commit)
via 823e0fcf308c7f3fc88ba48070e12bd995e75392 (commit)
via 608d45610e9f499fb43d2e52eba461d489a7d45f (commit)
via e76dc86b0a01a54dab56cbf8552bd0c5fbb5b461 (commit)
via 16e52275c4c9e355cf4e448a5b17136f24324d7a (commit)
via 61029d971895738ba353841d99f4ca07ecf792b7 (commit)
via 1c8043e5b50bd47d7734397a08d5015e3672b9ad (commit)
via 9819295a58b8b40ca6d95c84f1f1de08fb0eb707 (commit)
via dc3b856b460ff380feb68cdff551f334e6db5a27 (commit)
via be9d5fe994e6a086a951e432d56e7de2af3cfd09 (commit)
via 11b8b873e7fd6722053aa224d20f29350bf2b298 (commit)
via b63b9aac20259f3612e23c7a3e977dcb48693ef1 (commit)
via 14a0766224d50d1c4c409e883cf29515dafc25f0 (commit)
via b5fbd9c942b1080aa60a48ee23da60574d1fc22f (commit)
via 63f4617b5ab99d75e98e40760ff68bb1615a84e6 (commit)
via 579fd2bf848e994ed6dcd8d1c3633f2fa62cbd28 (commit)
via 87a4f24037965ae88435ebe3f887750c500cbfde (commit)
via aa9497f4d2346e7a18cd07b9bf31dfb5832031bc (commit)
via 7b0201a4f98ee1b1288ae3b074cd1007707b6b21 (commit)
via ba7bc1e14fcf1a223a9a42ede2e9cd7d290c8b61 (commit)
via c6ef5865b3fd8e5d5fb8c891467b3722fde4d685 (commit)
via e05a3418c9d6b3f70cdb387d1f30d8ba59733f02 (commit)
via 525d9602da83a5d8ddbfc9ebda282209aa743a70 (commit)
via c6dc0f2d6f67d69d32e7f8c3c175d79f4b2ef430 (commit)
via 85b53414c2c8f70e541447ee204e004693289956 (commit)
via 6c3401b4a9fb79bdee7484e1e3c05758d1b0c0ca (commit)
via a5cf5c7b3a6ac9be60a8737f0e36a61897d32acd (commit)
via 734cae300ccd13aacec1f32b283d4d21b5de8fb5 (commit)
via 07708b4325680c4731f0d3dc24bca9da3c962d80 (commit)
via b4007e4b25d21ba3b693674ca19ead7d202b7de0 (commit)
via 688d0a641d4fa7a018fb4f9e131ed1454c68dd15 (commit)
via c136060da6a43da5db7e45b6a32da83f0f7d0820 (commit)
from c4131b7a0c4a6d666a35847f8cce3d099b7a9949 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 77030a4789285a3f08fbdd9621a384a9e008f4a8
Author: Ocean Wang <wanghaidong at cnnic.cn>
Date: Wed Aug 17 10:03:35 2011 +0800
[trac1130] Update ChangeLog file
commit a030033e5a53dd18157509c6c101340688d16011
Merge: c4131b7a0c4a6d666a35847f8cce3d099b7a9949 485e0ba7f7fe11e4d28e3eec2be835157521a6e9
Author: Ocean Wang <wanghaidong at cnnic.cn>
Date: Wed Aug 17 09:49:00 2011 +0800
Merge branch 'master' into trac1130
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 15 +-
README | 10 +-
doc/guide/bind10-guide.html | 56 +-
doc/guide/bind10-guide.xml | 877 +++++++++-
doc/guide/bind10-messages.html | 1025 ++++++++---
doc/guide/bind10-messages.xml | 1849 ++++++++++++++++----
src/bin/auth/auth.spec.pre.in | 18 +
src/bin/auth/b10-auth.8 | 47 +-
src/bin/auth/b10-auth.xml | 48 +-
src/bin/auth/query.cc | 8 +-
src/bin/auth/query.h | 8 +-
src/bin/auth/tests/query_unittest.cc | 8 +-
src/bin/bind10/bind10.xml | 28 +-
src/bin/bind10/bob.spec | 11 +
src/bin/bind10/creatorapi.txt | 123 ++
src/bin/resolver/b10-resolver.xml | 28 +-
src/bin/stats/b10-stats.xml | 122 ++-
src/bin/stats/stats-schema.spec | 3 +-
src/bin/stats/stats.spec | 45 +
src/bin/stats/tests/isc/config/ccsession.py | 89 +
src/bin/xfrin/b10-xfrin.xml | 1 +
src/bin/xfrout/b10-xfrout.xml | 8 +
src/lib/cache/cache_messages.mes | 2 +-
src/lib/cc/session.cc | 2 +-
src/lib/config/module_spec.cc | 91 +-
src/lib/config/module_spec.h | 23 +-
src/lib/config/tests/ccsession_unittests.cc | 4 +-
src/lib/config/tests/module_spec_unittests.cc | 158 ++-
src/lib/config/tests/testdata/Makefile.am | 8 +
src/lib/config/tests/testdata/data33_1.data | 7 +
src/lib/config/tests/testdata/data33_2.data | 7 +
src/lib/config/tests/testdata/spec2.spec | 11 +
src/lib/config/tests/testdata/spec33.spec | 50 +
src/lib/config/tests/testdata/spec34.spec | 14 +
src/lib/config/tests/testdata/spec35.spec | 15 +
src/lib/config/tests/testdata/spec36.spec | 17 +
src/lib/config/tests/testdata/spec37.spec | 7 +
src/lib/config/tests/testdata/spec38.spec | 17 +
src/lib/datasrc/Makefile.am | 2 +
src/lib/datasrc/client.h | 2 +
src/lib/datasrc/database.cc | 405 +++++
src/lib/datasrc/database.h | 367 ++++
src/lib/datasrc/datasrc_messages.mes | 68 +-
src/lib/datasrc/memory_datasrc.cc | 6 +-
src/lib/datasrc/memory_datasrc.h | 6 +-
src/lib/datasrc/sqlite3_accessor.cc | 412 +++++
src/lib/datasrc/sqlite3_accessor.h | 160 ++
src/lib/datasrc/tests/Makefile.am | 2 +
src/lib/datasrc/tests/database_unittest.cc | 943 ++++++++++
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 245 +++
src/lib/datasrc/zone.h | 6 +-
src/lib/dns/Makefile.am | 2 +
src/lib/dns/rdata/generic/afsdb_18.cc | 170 ++
src/lib/dns/rdata/generic/afsdb_18.h | 74 +
src/lib/dns/rdata/generic/rrsig_46.cc | 5 +
src/lib/dns/rdata/generic/rrsig_46.h | 3 +
src/lib/dns/tests/Makefile.am | 1 +
src/lib/dns/tests/rdata_afsdb_unittest.cc | 210 +++
src/lib/dns/tests/rdata_rrsig_unittest.cc | 2 +-
src/lib/dns/tests/testdata/Makefile.am | 8 +
.../dns/tests/testdata/rdata_afsdb_fromWire1.spec | 3 +
.../dns/tests/testdata/rdata_afsdb_fromWire2.spec | 6 +
.../dns/tests/testdata/rdata_afsdb_fromWire3.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_fromWire4.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_fromWire5.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_toWire1.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_toWire2.spec | 8 +
src/lib/python/isc/config/ccsession.py | 1 +
src/lib/python/isc/config/cfgmgr.py | 15 +
src/lib/python/isc/config/module_spec.py | 111 ++-
src/lib/python/isc/config/tests/cfgmgr_test.py | 22 +
.../python/isc/config/tests/module_spec_test.py | 109 ++
src/lib/util/filename.h | 5 +
src/lib/util/python/gen_wiredata.py.in | 21 +
src/lib/util/tests/filename_unittest.cc | 15 +
75 files changed, 7469 insertions(+), 822 deletions(-)
create mode 100644 src/bin/bind10/creatorapi.txt
create mode 100644 src/lib/config/tests/testdata/data33_1.data
create mode 100644 src/lib/config/tests/testdata/data33_2.data
create mode 100644 src/lib/config/tests/testdata/spec33.spec
create mode 100644 src/lib/config/tests/testdata/spec34.spec
create mode 100644 src/lib/config/tests/testdata/spec35.spec
create mode 100644 src/lib/config/tests/testdata/spec36.spec
create mode 100644 src/lib/config/tests/testdata/spec37.spec
create mode 100644 src/lib/config/tests/testdata/spec38.spec
create mode 100644 src/lib/datasrc/database.cc
create mode 100644 src/lib/datasrc/database.h
create mode 100644 src/lib/datasrc/sqlite3_accessor.cc
create mode 100644 src/lib/datasrc/sqlite3_accessor.h
create mode 100644 src/lib/datasrc/tests/database_unittest.cc
create mode 100644 src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
create mode 100644 src/lib/dns/rdata/generic/afsdb_18.cc
create mode 100644 src/lib/dns/rdata/generic/afsdb_18.h
create mode 100644 src/lib/dns/tests/rdata_afsdb_unittest.cc
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 5a14558..8adcf12 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,18 @@
+280. [func ocean
+ libdns++: Implement the NAPTR rrtype according to RFC2915,
+ RFC2168 and RFC3403.
+ (Trac #1130, git a030033e5a53dd18157509c6c101340688d16011)
+
+279. [func] jerry
+ libdns++: Implement the AFSDB rrtype according to RFC1183.
+ (Trac #1114, git ce052cd92cd128ea3db5a8f154bd151956c2920c)
+
+278. [doc] jelte
+ Add logging configuration documentation to the guide.
+ (Trac #1011, git 2cc500af0929c1f268aeb6f8480bc428af70f4c4)
+
277. [func] jerry
- Implement the SRV rrtype according to RFC2782.
+ libdns++: Implement the SRV rrtype according to RFC2782.
(Trac #1128, git 5fd94aa027828c50e63ae1073d9d6708e0a9c223)
276. [func] stephen
diff --git a/README b/README
index a6509da..4b84a88 100644
--- a/README
+++ b/README
@@ -8,10 +8,10 @@ for serving, maintaining, and developing DNS.
BIND10-devel is new development leading up to the production
BIND 10 release. It contains prototype code and experimental
interfaces. Nevertheless it is ready to use now for testing the
-new BIND 10 infrastructure ideas. The Year 2 milestones of the
-five year plan are described here:
+new BIND 10 infrastructure ideas. The Year 3 goals of the five
+year plan are described here:
- https://bind10.isc.org/wiki/Year2Milestones
+ http://bind10.isc.org/wiki/Year3Goals
This release includes the bind10 master process, b10-msgq message
bus, b10-auth authoritative DNS server (with SQLite3 and in-memory
@@ -67,8 +67,8 @@ e.g.,
Operating-System specific tips:
- FreeBSD
- You may need to install a python binding for sqlite3 by hand. A
- sample procedure is as follows:
+ You may need to install a python binding for sqlite3 by hand.
+ A sample procedure is as follows:
- add the following to /etc/make.conf
PYTHON_VERSION=3.1
- build and install the python binding from ports, assuming the top
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 5754cf0..94adf4a 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,24 +1,24 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
- 20110519.</p></div><div><p class="copyright">Copyright © 2010 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110705. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298906"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
+ 20110705.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the reference guide for BIND 10 version 20110519.
+ This is the reference guide for BIND 10 version 20110705.
The most up-to-date version of this document, along with
- other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+ other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299042">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299068">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284849">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285038">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285058">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285118">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285217">Build</a></span></dt><dt><span class="section"><a href="#id1168230285232">Install</a></span></dt><dt><span class="section"><a href="#id1168230285255">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285830">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285895">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285925">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286381">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299042">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299068">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
BIND is the popular implementation of a DNS server, developer
interfaces, and DNS tools.
BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++ and Python
and provides a modular environment for serving and maintaining DNS.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This guide covers the experimental prototype of
- BIND 10 version 20110519.
+ BIND 10 version 20110705.
</p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
BIND 10 provides a EDNS0- and DNSSEC-capable
authoritative DNS server and a caching recursive name server
which also provides forwarding.
- </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299038"></a>Supported Platforms</h2></div></div></div><p>
+ </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299042"></a>Supported Platforms</h2></div></div></div><p>
BIND 10 builds have been tested on Debian GNU/Linux 5,
Ubuntu 9.10, NetBSD 5, Solaris 10, FreeBSD 7 and 8, and CentOS
Linux 5.3.
@@ -28,13 +28,15 @@
It is planned for BIND 10 to build, install and run on
Windows and standard Unix-type platforms.
- </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299065"></a>Required Software</h2></div></div></div><p>
+ </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299068"></a>Required Software</h2></div></div></div><p>
BIND 10 requires Python 3.1. Later versions may work, but Python
3.1 is the minimum version which will work.
</p><p>
BIND 10 uses the Botan crypto library for C++. It requires
- at least Botan version 1.8. To build BIND 10, install the
- Botan libraries and development include headers.
+ at least Botan version 1.8.
+ </p><p>
+ BIND 10 uses the log4cplus C++ logging library. It requires
+ at least log4cplus version 1.0.3.
</p><p>
The authoritative server requires SQLite 3.3.9 or newer.
The <span class="command"><strong>b10-xfrin</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>,
@@ -136,7 +138,10 @@
and, of course, DNS. These include detailed developer
documentation and code examples.
- </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284846"></a>Building Requirements</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284849">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285038">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285058">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285118">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285217">Build</a></span></dt><dt><span class="section"><a href="#id1168230285232">Install</a></span></dt><dt><span class="section"><a href="#id1168230285255">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284849"></a>Building Requirements</h2></div></div></div><p>
+ In addition to the run-time requirements, building BIND 10
+ from source code requires various development include headers.
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Some operating systems have split their distribution packages into
a run-time and a development package. You will need to install
the development package versions, which include header files and
@@ -147,6 +152,11 @@
</p><p>
+ To build BIND 10, also install the Botan (at least version
+ 1.8) and the log4cplus (at least version 1.0.3)
+ development include headers.
+ </p><p>
+
The Python Library and Python _sqlite3 module are required to
enable the Xfrout and Xfrin support.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
@@ -156,7 +166,7 @@
Building BIND 10 also requires a C++ compiler and
standard development headers, make, and pkg-config.
BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
- 4.1.3, 4.2.1, 4.3.2, and 4.4.1.
+ 4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
</p></div><div class="section" title="Quick start"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="quickstart"></a>Quick start</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This quickly covers the standard steps for installing
and deploying BIND 10 as an authoritative name server using
@@ -192,14 +202,14 @@
the Git code revision control system or as a downloadable
tar file. It may also be available in pre-compiled ready-to-use
packages from operating system vendors.
- </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285026"></a>Download Tar File</h3></div></div></div><p>
+ </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285038"></a>Download Tar File</h3></div></div></div><p>
Downloading a release tar file is the recommended method to
obtain the source code.
</p><p>
The BIND 10 releases are available as tar file downloads from
<a class="ulink" href="ftp://ftp.isc.org/isc/bind10/" target="_top">ftp://ftp.isc.org/isc/bind10/</a>.
Periodic development snapshots may also be available.
- </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285045"></a>Retrieve from Git</h3></div></div></div><p>
+ </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285058"></a>Retrieve from Git</h3></div></div></div><p>
Downloading this "bleeding edge" code is recommended only for
developers or advanced users. Using development code in a production
environment is not recommended.
@@ -233,7 +243,7 @@
<span class="command"><strong>autoheader</strong></span>,
<span class="command"><strong>automake</strong></span>,
and related commands.
- </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285106"></a>Configure before the build</h3></div></div></div><p>
+ </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285118"></a>Configure before the build</h3></div></div></div><p>
BIND 10 uses the GNU Build System to discover build environment
details.
To generate the makefiles using the defaults, simply run:
@@ -264,16 +274,16 @@
</p><p>
If the configure fails, it may be due to missing or old
dependencies.
- </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285203"></a>Build</h3></div></div></div><p>
+ </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285217"></a>Build</h3></div></div></div><p>
After the configure step is complete, to build the executables
from the C++ code and prepare the Python scripts, run:
</p><pre class="screen">$ <strong class="userinput"><code>make</code></strong></pre><p>
- </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285219"></a>Install</h3></div></div></div><p>
+ </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285232"></a>Install</h3></div></div></div><p>
To install the BIND 10 executables, support files,
and documentation, run:
</p><pre class="screen">$ <strong class="userinput"><code>make install</code></strong></pre><p>
- </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285242"></a>Install Hierarchy</h3></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285255"></a>Install Hierarchy</h3></div></div></div><p>
The following is the layout of the complete BIND 10 installation:
</p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem">
<code class="filename">bin/</code> —
@@ -490,12 +500,12 @@ shutdown
the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
channel) the configuration on to the specified module.
</p><p>
- </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></div><p>
+ </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285830">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285895">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285925">Loading Master Zones Files</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
It supports EDNS0 and DNSSEC. It supports IPv6.
Normally it is started by the <span class="command"><strong>bind10</strong></span> master
process.
- </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285816"></a>Server Configurations</h2></div></div></div><p>
+ </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285830"></a>Server Configurations</h2></div></div></div><p>
<span class="command"><strong>b10-auth</strong></span> is configured via the
<span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -515,7 +525,7 @@ This may be a temporary setting until then.
</p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
</dd></dl></div><p>
- </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285881"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285895"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
supports a SQLite3 data source backend and in-memory data source
backend.
@@ -529,7 +539,7 @@ This may be a temporary setting until then.
The default is <code class="filename">/usr/local/var/</code>.)
This data file location may be changed by defining the
<span class="quote">“<span class="quote">database_file</span>”</span> configuration.
- </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285912"></a>Loading Master Zones Files</h2></div></div></div><p>
+ </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285925"></a>Loading Master Zones Files</h2></div></div></div><p>
RFC 1035 style DNS master zone files may imported
into a BIND 10 data source by using the
<span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -607,7 +617,7 @@ This may be a temporary setting until then.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Access control (such as allowing notifies) is not yet provided.
The primary/secondary service is not yet complete.
- </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286381">Forwarding</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-resolver</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
@@ -636,7 +646,7 @@ This may be a temporary setting until then.
> <strong class="userinput"><code>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</code></strong>
> <strong class="userinput"><code>config commit</code></strong>
</pre><p>
- </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286300"></a>Forwarding</h2></div></div></div><p>
+ </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286381"></a>Forwarding</h2></div></div></div><p>
To enable forwarding, the upstream address and port must be
configured to forward queries to, such as:
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 6a42182..021c593 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -146,7 +146,7 @@
The processes started by the <command>bind10</command>
command have names starting with "b10-", including:
</para>
-
+
<para>
<itemizedlist>
@@ -241,7 +241,7 @@
<section id="managing_once_running">
<title>Managing BIND 10</title>
-
+
<para>
Once BIND 10 is running, a few commands are used to interact
directly with the system:
@@ -280,7 +280,7 @@
<!-- TODO point to these -->
In addition, manual pages are also provided in the default installation.
</para>
-
+
<!--
bin/
bindctl*
@@ -387,7 +387,7 @@ Debian and Ubuntu:
</para>
<orderedlist>
-
+
<listitem>
<simpara>
Install required build dependencies.
@@ -471,7 +471,7 @@ Debian and Ubuntu:
Downloading a release tar file is the recommended method to
obtain the source code.
</para>
-
+
<para>
The BIND 10 releases are available as tar file downloads from
<ulink url="ftp://ftp.isc.org/isc/bind10/"/>.
@@ -547,37 +547,37 @@ Debian and Ubuntu:
<varlistentry>
<term>--prefix</term>
<listitem>
- <simpara>Define the the installation location (the
+ <simpara>Define the installation location (the
default is <filename>/usr/local/</filename>).
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-boost-include</term>
- <listitem>
+ <listitem>
<simpara>Define the path to find the Boost headers.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-pythonpath</term>
- <listitem>
+ <listitem>
<simpara>Define the path to Python 3.1 if it is not in the
standard execution path.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-gtest</term>
- <listitem>
+ <listitem>
<simpara>Enable building the C++ Unit Tests using the
Google Tests framework. Optionally this can define the
path to the gtest header files and library.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
</variablelist>
@@ -696,13 +696,13 @@ Debian and Ubuntu:
</para>
</section>
-->
-
+
</chapter>
<chapter id="bind10">
<title>Starting BIND10 with <command>bind10</command></title>
<para>
- BIND 10 provides the <command>bind10</command> command which
+ BIND 10 provides the <command>bind10</command> command which
starts up the required processes.
<command>bind10</command>
will also restart processes that exit unexpectedly.
@@ -711,7 +711,7 @@ Debian and Ubuntu:
<para>
After starting the <command>b10-msgq</command> communications channel,
- <command>bind10</command> connects to it,
+ <command>bind10</command> connects to it,
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</para>
@@ -742,6 +742,16 @@ Debian and Ubuntu:
get additional debugging or diagnostic output.
</para>
<!-- TODO: note it doesn't go into background -->
+
+ <note>
+ <para>
+ If the setproctitle Python module is detected at start up,
+ the process names for the Python-based daemons will be renamed
+ to better identify them instead of just <quote>python</quote>.
+ This is not needed on some operating systems.
+ </para>
+ </note>
+
</section>
</chapter>
@@ -769,7 +779,7 @@ Debian and Ubuntu:
<command>b10-msgq</command> service.
It listens on 127.0.0.1.
</para>
-
+
<!-- TODO: this is broken, see Trac #111
<para>
To select an alternate port for the <command>b10-msgq</command> to
@@ -1095,10 +1105,10 @@ since we used bind10 -->
The configuration data item is:
<variablelist>
-
+
<varlistentry>
<term>database_file</term>
- <listitem>
+ <listitem>
<simpara>This is an optional string to define the path to find
the SQLite3 database file.
<!-- TODO: -->
@@ -1120,7 +1130,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>shutdown</term>
- <listitem>
+ <listitem>
<simpara>Stop the authoritative DNS server.
</simpara>
<!-- TODO: what happens when this is sent, will bind10 restart? -->
@@ -1176,7 +1186,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$INCLUDE</term>
- <listitem>
+ <listitem>
<simpara>Loads an additional zone file. This may be recursive.
</simpara>
</listitem>
@@ -1184,7 +1194,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$ORIGIN</term>
- <listitem>
+ <listitem>
<simpara>Defines the relative domain name.
</simpara>
</listitem>
@@ -1192,7 +1202,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$TTL</term>
- <listitem>
+ <listitem>
<simpara>Defines the time-to-live value used for following
records that don't include a TTL.
</simpara>
@@ -1257,7 +1267,7 @@ TODO
<note><simpara>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
<!-- TODO: sqlite3 data source only? -->
@@ -1304,7 +1314,7 @@ what if a NOTIFY is sent?
<note><simpara>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
Access control is not yet provided.
</simpara></note>
@@ -1392,6 +1402,67 @@ what is XfroutClient xfr_client??
<!-- TODO: later the above will have some defaults -->
<section>
+ <title>Access Control</title>
+
+ <para>
+ The <command>b10-resolver</command> daemon only accepts
+ DNS queries from the localhost (127.0.0.1 and ::1).
+ The <option>Resolver/query_acl</option> configuration may
+ be used to reject, drop, or allow specific IPs or networks.
+ This configuration list is first match.
+ </para>
+
+ <para>
+ The configuration's <option>action</option> item may be
+ set to <quote>ACCEPT</quote> to allow the incoming query,
+ <quote>REJECT</quote> to respond with a DNS REFUSED return
+ code, or <quote>DROP</quote> to ignore the query without
+ any response (such as a blackhole). For more information,
+ see the respective debugging messages: <ulink
+ url="bind10-messages.html#RESOLVER_QUERY_ACCEPTED">RESOLVER_QUERY_ACCEPTED</ulink>,
+ <ulink
+ url="bind10-messages.html#RESOLVER_QUERY_REJECTED">RESOLVER_QUERY_REJECTED</ulink>,
+ and <ulink
+url="bind10-messages.html#RESOLVER_QUERY_DROPPED">RESOLVER_QUERY_DROPPED</ulink>.
+ </para>
+
+ <para>
+ The required configuration's <option>from</option> item is set
+ to an IPv4 or IPv6 address, addresses with an network mask, or to
+ the special lowercase keywords <quote>any6</quote> (for
+ any IPv6 address) or <quote>any4</quote> (for any IPv4
+ address).
+ </para>
+
+<!-- TODO:
+/0 is for any address in that address family
+does that need any address too?
+-->
+
+ <para>
+ For example to allow the <replaceable>192.168.1.0/24</replaceable>
+ network to use your recursive name server, at the
+ <command>bindctl</command> prompt run:
+ </para>
+
+ <screen>
+> <userinput>config add Resolver/query_acl</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/action "ACCEPT"</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/from "<replaceable>192.168.1.0/24</replaceable>"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+
+ <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+ as needed; run <quote><userinput>config show
+ Resolver/query_acl</userinput></quote> if needed.)</simpara>
+
+<!-- TODO: check this -->
+ <note><simpara>This prototype access control configuration
+ syntax may be changed.</simpara></note>
+
+ </section>
+
+ <section>
<title>Forwarding</title>
<para>
@@ -1470,61 +1541,679 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
<chapter id="logging">
<title>Logging</title>
-<!-- TODO: how to configure logging, logging destinations etc. -->
+ <section>
+ <title>Logging configuration</title>
- <para>
- Each message written by BIND 10 to the configured logging destinations
- comprises a number of components that identify the origin of the
- message and, if the message indicates a problem, information about the
- problem that may be useful in fixing it.
- </para>
+ <para>
- <para>
- Consider the message below logged to a file:
- <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
- ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
- </para>
+ The logging system in BIND 10 is configured through the
+ Logging module. All BIND 10 modules will look at the
+ configuration in Logging to see what should be logged and
+ to where.
- <para>
- Note: the layout of messages written to the system logging
- file (syslog) may be slightly different. This message has
- been split across two lines here for display reasons; in the
- logging file, it will appear on one line.)
- </para>
+<!-- TODO: what is context of Logging module for readers of this guide? -->
- <para>
- The log message comprises a number of components:
+ </para>
+
+ <section>
+ <title>Loggers</title>
+
+ <para>
+
+ Within BIND 10, a message is logged through a component
+ called a "logger". Different parts of BIND 10 log messages
+ through different loggers, and each logger can be configured
+ independently of one another.
+
+ </para>
+
+ <para>
+
+ In the Logging module, you can specify the configuration
+ for zero or more loggers; any that are not specified will
+ take appropriate default values..
+
+ </para>
+
+ <para>
+
+ The three most important elements of a logger configuration
+ are the <option>name</option> (the component that is
+ generating the messages), the <option>severity</option>
+ (what to log), and the <option>output_options</option>
+ (where to log).
+
+ </para>
+
+ <section>
+ <title>name (string)</title>
+
+ <para>
+ Each logger in the system has a name, the name being that
+ of the component using it to log messages. For instance,
+ if you want to configure logging for the resolver module,
+ you add an entry for a logger named <quote>Resolver</quote>. This
+ configuration will then be used by the loggers in the
+ Resolver module, and all the libraries used by it.
+ </para>
+
+<!-- TODO: later we will have a way to know names of all modules
+
+Right now you can only see what their names are if they are running
+(a simple 'help' without anything else in bindctl for instance).
+
+ -->
+
+ <para>
+
+ If you want to specify logging for one specific library
+ within the module, you set the name to
+ <replaceable>module.library</replaceable>. For example, the
+ logger used by the nameserver address store component
+ has the full name of <quote>Resolver.nsas</quote>. If
+ there is no entry in Logging for a particular library,
+ it will use the configuration given for the module.
+
+<!-- TODO: how to know these specific names?
+
+We will either have to document them or tell the administrator to
+specify module-wide logging and see what appears...
+
+-->
+
+ </para>
+
+ <para>
+
+<!-- TODO: severity has not been covered yet -->
+
+ To illustrate this, suppose you want the cache library
+ to log messages of severity DEBUG, and the rest of the
+ resolver code to log messages of severity INFO. To achieve
+ this you specify two loggers, one with the name
+ <quote>Resolver</quote> and severity INFO, and one with
+ the name <quote>Resolver.cache</quote> with severity
+ DEBUG. As there are no entries for other libraries (e.g.
+ the nsas), they will use the configuration for the module
+ (<quote>Resolver</quote>), so giving the desired behavior.
+
+ </para>
+
+ <para>
+
+ One special case is that of a module name of <quote>*</quote>
+ (asterisks), which is interpreted as <emphasis>any</emphasis>
+ module. You can set global logging options by using this,
+ including setting the logging configuration for a library
+ that is used by multiple modules (e.g. <quote>*.config</quote>
+ specifies the configuration library code in whatever
+ module is using it).
+
+ </para>
+
+ <para>
+
+ If there are multiple logger specifications in the
+ configuration that might match a particular logger, the
+ specification with the more specific logger name takes
+ precedence. For example, if there are entries for for
+ both <quote>*</quote> and <quote>Resolver</quote>, the
+ resolver module — and all libraries it uses —
+ will log messages according to the configuration in the
+ second entry (<quote>Resolver</quote>). All other modules
+ will use the configuration of the first entry
+ (<quote>*</quote>). If there was also a configuration
+ entry for <quote>Resolver.cache</quote>, the cache library
+ within the resolver would use that in preference to the
+ entry for <quote>Resolver</quote>.
+
+ </para>
+
+ <para>
+
+ One final note about the naming. When specifying the
+ module name within a logger, use the name of the module
+ as specified in <command>bindctl</command>, e.g.
+ <quote>Resolver</quote> for the resolver module,
+ <quote>Xfrout</quote> for the xfrout module, etc. When
+ the message is logged, the message will include the name
+ of the logger generating the message, but with the module
+ name replaced by the name of the process implementing
+ the module (so for example, a message generated by the
+ <quote>Auth.cache</quote> logger will appear in the output
+ with a logger name of <quote>b10-auth.cache</quote>).
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>severity (string)</title>
+
+ <para>
+
+ This specifies the category of messages logged.
+ Each message is logged with an associated severity which
+ may be one of the following (in descending order of
+ severity):
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <simpara> FATAL </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> ERROR </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> WARN </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> INFO </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> DEBUG </simpara>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+
+ When the severity of a logger is set to one of these
+ values, it will only log messages of that severity, and
+ the severities above it. The severity may also be set to
+ NONE, in which case all messages from that logger are
+ inhibited.
+
+<!-- TODO: worded wrong? If I set to INFO, why would it show DEBUG which is literally below in that list? -->
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>output_options (list)</title>
+
+ <para>
+
+ Each logger can have zero or more
+ <option>output_options</option>. These specify where log
+ messages are sent to. These are explained in detail below.
+
+ </para>
+
+ <para>
+
+ The other options for a logger are:
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>debuglevel (integer)</title>
+
+ <para>
+
+ When a logger's severity is set to DEBUG, this value
+ specifies what debug messages should be printed. It ranges
+ from 0 (least verbose) to 99 (most verbose).
+ </para>
+
+
+<!-- TODO: complete this sentence:
+
+ The general classification of debug message types is
+
+TODO; there's a ticket to determine these levels, see #1074
+
+ -->
+
+ <para>
+
+ If severity for the logger is not DEBUG, this value is ignored.
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>additive (true or false)</title>
+
+ <para>
+
+ If this is true, the <option>output_options</option> from
+ the parent will be used. For example, if there are two
+ loggers configured; <quote>Resolver</quote> and
+ <quote>Resolver.cache</quote>, and <option>additive</option>
+ is true in the second, it will write the log messages
+ not only to the destinations specified for
+ <quote>Resolver.cache</quote>, but also to the destinations
+ as specified in the <option>output_options</option> in
+ the logger named <quote>Resolver</quote>.
+
+<!-- TODO: check this -->
+
+ </para>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Output Options</title>
+
+ <para>
+
+ The main settings for an output option are the
+ <option>destination</option> and a value called
+ <option>output</option>, the meaning of which depends on
+ the destination that is set.
+
+ </para>
+
+ <section>
+ <title>destination (string)</title>
+
+ <para>
+
+ The destination is the type of output. It can be one of:
+
+ </para>
+
+ <itemizedlist>
+
+ <listitem>
+ <simpara> console </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> file </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> syslog </simpara>
+ </listitem>
+
+ </itemizedlist>
+
+ </section>
+
+ <section>
+ <title>output (string)</title>
+
+ <para>
+
+ Depending on what is set as the output destination, this
+ value is interpreted as follows:
+
+ </para>
<variablelist>
- <varlistentry>
- <term>2011-06-15 13:48:22.034</term>
- <listitem><para>
- The date and time at which the message was generated.
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>ERROR</term>
- <listitem><para>
- The severity of the message.
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>[b10-resolver.asiolink]</term>
- <listitem><para>
- The source of the message. This comprises two components:
- the BIND 10 process generating the message (in this
- case, <command>b10-resolver</command>) and the module
- within the program from which the message originated
- (which in the example is the asynchronous I/O link
- module, asiolink).
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>ASIODNS_OPENSOCK</term>
- <listitem><para>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>console</quote></term>
+ <listitem>
+ <simpara>
+ The value of output must be one of <quote>stdout</quote>
+ (messages printed to standard output) or
+ <quote>stderr</quote> (messages printed to standard
+ error).
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>file</quote></term>
+ <listitem>
+ <simpara>
+ The value of output is interpreted as a file name;
+ log messages will be appended to this file.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>syslog</quote></term>
+ <listitem>
+ <simpara>
+ The value of output is interpreted as the
+ <command>syslog</command> facility (e.g.
+ <emphasis>local0</emphasis>) that should be used
+ for log messages.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <para>
+
+ The other options for <option>output_options</option> are:
+
+ </para>
+
+ <section>
+ <title>flush (true of false)</title>
+
+ <para>
+ Flush buffers after each log message. Doing this will
+ reduce performance but will ensure that if the program
+ terminates abnormally, all messages up to the point of
+ termination are output.
+ </para>
+
+ </section>
+
+ <section>
+ <title>maxsize (integer)</title>
+
+ <para>
+ Only relevant when destination is file, this is maximum
+ file size of output files in bytes. When the maximum
+ size is reached, the file is renamed and a new file opened.
+ (For example, a ".1" is appended to the name —
+ if a ".1" file exists, it is renamed ".2",
+ etc.)
+ </para>
+
+ <para>
+ If this is 0, no maximum file size is used.
+ </para>
+
+ </section>
+
+ <section>
+ <title>maxver (integer)</title>
+
+ <para>
+ Maximum number of old log files to keep around when
+ rolling the output file. Only relevant when
+ <option>destination</option> is <quote>file</quote>.
+ </para>
+
+ </section>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Example session</title>
+
+ <para>
+
+ In this example we want to set the global logging to
+ write to the file <filename>/var/log/my_bind10.log</filename>,
+ at severity WARN. We want the authoritative server to
+ log at DEBUG with debuglevel 40, to a different file
+ (<filename>/tmp/debug_messages</filename>).
+
+ </para>
+
+ <para>
+
+ Start <command>bindctl</command>.
+
+ </para>
+
+ <para>
+
+ <screen>["login success "]
+> <userinput>config show Logging</userinput>
+Logging/loggers [] list
+</screen>
+
+ </para>
+
+ <para>
+
+ By default, no specific loggers are configured, in which
+ case the severity defaults to INFO and the output is
+ written to stderr.
+
+ </para>
+
+ <para>
+
+ Let's first add a default logger:
+
+ </para>
+
+<!-- TODO: adding the empty loggers makes no sense -->
+ <para>
+
+ <screen><userinput>> config add Logging/loggers</userinput>
+> <userinput>config show Logging</userinput>
+Logging/loggers/ list (modified)
+</screen>
+
+ </para>
+
+ <para>
+
+ The loggers value line changed to indicate that it is no
+ longer an empty list:
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput>config show Logging/loggers</userinput>
+Logging/loggers[0]/name "" string (default)
+Logging/loggers[0]/severity "INFO" string (default)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</screen>
+
+ </para>
+
+ <para>
+
+ The name is mandatory, so we must set it. We will also
+ change the severity as well. Let's start with the global
+ logger.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput>config set Logging/loggers[0]/name *</userinput>
+> <userinput>config set Logging/loggers[0]/severity WARN</userinput>
+> <userinput>config show Logging/loggers</userinput>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</screen>
+
+ </para>
+
+ <para>
+
+ Of course, we need to specify where we want the log
+ messages to go, so we add an entry for an output option.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config add Logging/loggers[0]/output_options</userinput>
+> <userinput> config show Logging/loggers[0]/output_options</userinput>
+Logging/loggers[0]/output_options[0]/destination "console" string (default)
+Logging/loggers[0]/output_options[0]/output "stdout" string (default)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 0 integer (default)
+Logging/loggers[0]/output_options[0]/maxver 0 integer (default)
+</screen>
+
+
+ </para>
+
+ <para>
+
+ These aren't the values we are looking for.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config set Logging/loggers[0]/output_options[0]/destination file</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/maxsize 30000</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/maxver 8</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ Which would make the entire configuration for this logger
+ look like:
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config show all Logging/loggers</userinput>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options[0]/destination "file" string (modified)
+Logging/loggers[0]/output_options[0]/output "/var/log/bind10.log" string (modified)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 30000 integer (modified)
+Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
+</screen>
+
+ </para>
+
+ <para>
+
+ That looks OK, so let's commit it before we add the
+ configuration for the authoritative server's logger.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config commit</userinput></screen>
+
+ </para>
+
+ <para>
+
+ Now that we have set it, and checked each value along
+ the way, adding a second entry is quite similar.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config add Logging/loggers</userinput>
+> <userinput> config set Logging/loggers[1]/name Auth</userinput>
+> <userinput> config set Logging/loggers[1]/severity DEBUG</userinput>
+> <userinput> config set Logging/loggers[1]/debuglevel 40</userinput>
+> <userinput> config add Logging/loggers[1]/output_options</userinput>
+> <userinput> config set Logging/loggers[1]/output_options[0]/destination file</userinput>
+> <userinput> config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log</userinput>
+> <userinput> config commit</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ And that's it. Once we have found whatever it was we
+ needed the debug messages for, we can simply remove the
+ second logger to let the authoritative server use the
+ same settings as the rest.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config remove Logging/loggers[1]</userinput>
+> <userinput> config commit</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ And every module will now be using the values from the
+ logger named <quote>*</quote>.
+
+ </para>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Logging Message Format</title>
+
+ <para>
+ Each message written by BIND 10 to the configured logging
+ destinations comprises a number of components that identify
+ the origin of the message and, if the message indicates
+ a problem, information about the problem that may be
+ useful in fixing it.
+ </para>
+
+ <para>
+ Consider the message below logged to a file:
+ <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
+ </para>
+
+ <para>
+ Note: the layout of messages written to the system logging
+ file (syslog) may be slightly different. This message has
+ been split across two lines here for display reasons; in the
+ logging file, it will appear on one line.)
+ </para>
+
+ <para>
+ The log message comprises a number of components:
+
+ <variablelist>
+ <varlistentry>
+ <term>2011-06-15 13:48:22.034</term>
+<!-- TODO: timestamp repeated even if using syslog? -->
+ <listitem><para>
+ The date and time at which the message was generated.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ERROR</term>
+ <listitem><para>
+ The severity of the message.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>[b10-resolver.asiolink]</term>
+ <listitem><para>
+ The source of the message. This comprises two components:
+ the BIND 10 process generating the message (in this
+ case, <command>b10-resolver</command>) and the module
+ within the program from which the message originated
+ (which in the example is the asynchronous I/O link
+ module, asiolink).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ASIODNS_OPENSOCK</term>
+ <listitem><para>
The message identification. Every message in BIND 10
has a unique identification, which can be used as an
index into the <ulink
@@ -1532,25 +2221,29 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
Manual</citetitle></ulink> (<ulink
url="http://bind10.isc.org/docs/bind10-messages.html"
/>) from which more information can be obtained.
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
- <listitem><para>
- A brief description of the cause of the problem. Within this text,
- information relating to the condition that caused the message to
- be logged will be included. In this example, error number 111
- (an operating system-specific error number) was encountered when
- trying to open a TCP connection to port 53 on the local system
- (address 127.0.0.1). The next step would be to find out the reason
- for the failure by consulting your system's documentation to
- identify what error number 111 means.
- </para></listitem>
- </varlistentry>
- </variablelist>
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
+ <listitem><para>
+ A brief description of the cause of the problem.
+ Within this text, information relating to the condition
+ that caused the message to be logged will be included.
+ In this example, error number 111 (an operating
+ system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the
+ local system (address 127.0.0.1). The next step
+ would be to find out the reason for the failure by
+ consulting your system's documentation to identify
+ what error number 111 means.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ </section>
- </para>
</chapter>
<!-- TODO: how to help: run unit tests, join lists, review trac tickets -->
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
index b075e96..ecebcd8 100644
--- a/doc/guide/bind10-messages.html
+++ b/doc/guide/bind10-messages.html
@@ -1,10 +1,10 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
- 20110519.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110705. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298906"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+ 20110705.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the messages manual for BIND 10 version 20110519.
+ This is the messages manual for BIND 10 version 20110705.
The most up-to-date version of this document, along with
other documents for BIND 10, can be found at
<a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
@@ -26,38 +26,337 @@
For information on configuring and using BIND 10 logging,
refer to the <a class="ulink" href="bind10-guide.html" target="_top">BIND 10 Guide</a>.
</p></div><div class="chapter" title="Chapter 2. BIND 10 Messages"><div class="titlepage"><div><div><h2 class="title"><a name="messages"></a>Chapter 2. BIND 10 Messages</h2></div></div></div><p>
- </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCHCOMP"></a><span class="term">ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</span></dt><dd><p>
-A debug message, this records the the upstream fetch (a query made by the
+ </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCH_COMPLETED"></a><span class="term">ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</span></dt><dd><p>
+A debug message, this records that the upstream fetch (a query made by the
resolver on behalf of its client) to the specified address has completed.
-</p></dd><dt><a name="ASIODNS_FETCHSTOP"></a><span class="term">ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_FETCH_STOPPED"></a><span class="term">ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
An external component has requested the halting of an upstream fetch. This
is an allowed operation, and the message should only appear if debug is
enabled.
-</p></dd><dt><a name="ASIODNS_OPENSOCK"></a><span class="term">ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_OPEN_SOCKET"></a><span class="term">ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
The asynchronous I/O code encountered an error when trying to open a socket
of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
+The number of the system error that cause the problem is given in the
message.
-</p></dd><dt><a name="ASIODNS_RECVSOCK"></a><span class="term">ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</span></dt><dd><p>
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol. The the number of the system
+</p></dd><dt><a name="ASIODNS_READ_DATA"></a><span class="term">ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
error that cause the problem is given in the message.
-</p></dd><dt><a name="ASIODNS_RECVTMO"></a><span class="term">ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_READ_TIMEOUT"></a><span class="term">ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
An upstream fetch from the specified address timed out. This may happen for
any number of reasons and is most probably a problem at the remote server
or a problem on the network. The message will only appear if debug is
enabled.
-</p></dd><dt><a name="ASIODNS_SENDSOCK"></a><span class="term">ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_SEND_DATA"></a><span class="term">ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
The asynchronous I/O code encountered an error when trying send data to
the specified address on the given protocol. The the number of the system
error that cause the problem is given in the message.
-</p></dd><dt><a name="ASIODNS_UNKORIGIN"></a><span class="term">ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
-</p></dd><dt><a name="ASIODNS_UNKRESULT"></a><span class="term">ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message). This message should
-not appear and may indicate an internal error. Please enter a bug report.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_ORIGIN"></a><span class="term">ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_RESULT"></a><span class="term">ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
+</p></dd><dt><a name="AUTH_AXFR_ERROR"></a><span class="term">AUTH_AXFR_ERROR error handling AXFR request: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</p></dd><dt><a name="AUTH_AXFR_UDP"></a><span class="term">AUTH_AXFR_UDP AXFR query received over UDP</span></dt><dd><p>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_COMMAND_FAILED"></a><span class="term">AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</span></dt><dd><p>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_CREATED"></a><span class="term">AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</span></dt><dd><p>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</span></dt><dd><p>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_STARTED"></a><span class="term">AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</span></dt><dd><p>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_LOAD_FAIL"></a><span class="term">AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</span></dt><dd><p>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</p></dd><dt><a name="AUTH_CONFIG_UPDATE_FAIL"></a><span class="term">AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</span></dt><dd><p>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</p></dd><dt><a name="AUTH_DATA_SOURCE"></a><span class="term">AUTH_DATA_SOURCE data source database file: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</p></dd><dt><a name="AUTH_DNS_SERVICES_CREATED"></a><span class="term">AUTH_DNS_SERVICES_CREATED DNS services created</span></dt><dd><p>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritiative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_HEADER_PARSE_FAIL"></a><span class="term">AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</p></dd><dt><a name="AUTH_LOAD_TSIG"></a><span class="term">AUTH_LOAD_TSIG loading TSIG keys</span></dt><dd><p>
+This is a debug message indicating that the authoritiative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_LOAD_ZONE"></a><span class="term">AUTH_LOAD_ZONE loaded zone %1/%2</span></dt><dd><p>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_DISABLED"></a><span class="term">AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_ENABLED"></a><span class="term">AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</p></dd><dt><a name="AUTH_NOTIFY_QUESTIONS"></a><span class="term">AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</p></dd><dt><a name="AUTH_NOTIFY_RRTYPE"></a><span class="term">AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_NO_STATS_SESSION"></a><span class="term">AUTH_NO_STATS_SESSION session interface for statistics is not available</span></dt><dd><p>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</p></dd><dt><a name="AUTH_NO_XFRIN"></a><span class="term">AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PARSE_ERROR"></a><span class="term">AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PROTOCOL_ERROR"></a><span class="term">AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_RECEIVED"></a><span class="term">AUTH_PACKET_RECEIVED message received:\n%1</span></dt><dd><p>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</p><p>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_PROCESS_FAIL"></a><span class="term">AUTH_PROCESS_FAIL message processing failure: %1</span></dt><dd><p>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</p><p>
+The server will return a SERVFAIL error code to the sender of the packet.
+However, this message indicates a potential error in the server.
+Please open a bug ticket for this issue.
+</p></dd><dt><a name="AUTH_RECEIVED_COMMAND"></a><span class="term">AUTH_RECEIVED_COMMAND command '%1' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</p></dd><dt><a name="AUTH_RECEIVED_SENDSTATS"></a><span class="term">AUTH_RECEIVED_SENDSTATS command 'sendstats' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</p></dd><dt><a name="AUTH_RESPONSE_RECEIVED"></a><span class="term">AUTH_RESPONSE_RECEIVED received response message, ignoring</span></dt><dd><p>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</p></dd><dt><a name="AUTH_SEND_ERROR_RESPONSE"></a><span class="term">AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SEND_NORMAL_RESPONSE"></a><span class="term">AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SERVER_CREATED"></a><span class="term">AUTH_SERVER_CREATED server created</span></dt><dd><p>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</p></dd><dt><a name="AUTH_SERVER_FAILED"></a><span class="term">AUTH_SERVER_FAILED server failed: %1</span></dt><dd><p>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</p></dd><dt><a name="AUTH_SERVER_STARTED"></a><span class="term">AUTH_SERVER_STARTED server started</span></dt><dd><p>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</p></dd><dt><a name="AUTH_SQLITE3"></a><span class="term">AUTH_SQLITE3 nothing to do for loading sqlite3</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_CREATED"></a><span class="term">AUTH_STATS_CHANNEL_CREATED STATS session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_COMMS"></a><span class="term">AUTH_STATS_COMMS communication error in sending statistics data: %1</span></dt><dd><p>
+An error was encountered when the authoritiative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMEOUT"></a><span class="term">AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</span></dt><dd><p>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMER_DISABLED"></a><span class="term">AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</p></dd><dt><a name="AUTH_STATS_TIMER_SET"></a><span class="term">AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</p></dd><dt><a name="AUTH_UNSUPPORTED_OPCODE"></a><span class="term">AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</span></dt><dd><p>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_CREATED"></a><span class="term">AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_ZONEMGR_COMMS"></a><span class="term">AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</p></dd><dt><a name="AUTH_ZONEMGR_ERROR"></a><span class="term">AUTH_ZONEMGR_ERROR received error response from zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</p></dd><dt><a name="CC_ASYNC_READ_FAILED"></a><span class="term">CC_ASYNC_READ_FAILED asynchronous read failed</span></dt><dd><p>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</p></dd><dt><a name="CC_CONN_ERROR"></a><span class="term">CC_CONN_ERROR error connecting to message queue (%1)</span></dt><dd><p>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</p></dd><dt><a name="CC_DISCONNECT"></a><span class="term">CC_DISCONNECT disconnecting from message queue daemon</span></dt><dd><p>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</p></dd><dt><a name="CC_ESTABLISH"></a><span class="term">CC_ESTABLISH trying to establish connection with message queue daemon at %1</span></dt><dd><p>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</p></dd><dt><a name="CC_ESTABLISHED"></a><span class="term">CC_ESTABLISHED successfully connected to message queue daemon</span></dt><dd><p>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</p></dd><dt><a name="CC_GROUP_RECEIVE"></a><span class="term">CC_GROUP_RECEIVE trying to receive a message</span></dt><dd><p>
+Debug message, noting that a message is expected to come over the command
+channel.
+</p></dd><dt><a name="CC_GROUP_RECEIVED"></a><span class="term">CC_GROUP_RECEIVED message arrived ('%1', '%2')</span></dt><dd><p>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</p></dd><dt><a name="CC_GROUP_SEND"></a><span class="term">CC_GROUP_SEND sending message '%1' to group '%2'</span></dt><dd><p>
+Debug message, we're about to send a message over the command channel.
+</p></dd><dt><a name="CC_INVALID_LENGTHS"></a><span class="term">CC_INVALID_LENGTHS invalid length parameters (%1, %2)</span></dt><dd><p>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of message, the second length of the header. The header and it's length
+(2 bytes) is counted in the total length.
+</p></dd><dt><a name="CC_LENGTH_NOT_READY"></a><span class="term">CC_LENGTH_NOT_READY length not ready</span></dt><dd><p>
+There should be data representing length of message on the socket, but it
+is not there.
+</p></dd><dt><a name="CC_NO_MESSAGE"></a><span class="term">CC_NO_MESSAGE no message ready to be received yet</span></dt><dd><p>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</p></dd><dt><a name="CC_NO_MSGQ"></a><span class="term">CC_NO_MSGQ unable to connect to message queue (%1)</span></dt><dd><p>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</p></dd><dt><a name="CC_READ_ERROR"></a><span class="term">CC_READ_ERROR error reading data from command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</p></dd><dt><a name="CC_READ_EXCEPTION"></a><span class="term">CC_READ_EXCEPTION error reading data from command channel (%1)</span></dt><dd><p>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</p></dd><dt><a name="CC_REPLY"></a><span class="term">CC_REPLY replying to message from '%1' with '%2'</span></dt><dd><p>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</p></dd><dt><a name="CC_SET_TIMEOUT"></a><span class="term">CC_SET_TIMEOUT setting timeout to %1ms</span></dt><dd><p>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</p></dd><dt><a name="CC_START_READ"></a><span class="term">CC_START_READ starting asynchronous read</span></dt><dd><p>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</p></dd><dt><a name="CC_SUBSCRIBE"></a><span class="term">CC_SUBSCRIBE subscribing to communication group %1</span></dt><dd><p>
+Debug message. The program wants to receive messages addressed to this group.
+</p></dd><dt><a name="CC_TIMEOUT"></a><span class="term">CC_TIMEOUT timeout reading data from command channel</span></dt><dd><p>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</p></dd><dt><a name="CC_UNSUBSCRIBE"></a><span class="term">CC_UNSUBSCRIBE unsubscribing from communication group %1</span></dt><dd><p>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</p></dd><dt><a name="CC_WRITE_ERROR"></a><span class="term">CC_WRITE_ERROR error writing data to command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</p></dd><dt><a name="CC_ZERO_LENGTH"></a><span class="term">CC_ZERO_LENGTH invalid message length (0)</span></dt><dd><p>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</p></dd><dt><a name="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE"></a><span class="term">CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</span></dt><dd><p>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</p></dd><dt><a name="CFGMGR_CC_SESSION_ERROR"></a><span class="term">CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</span></dt><dd><p>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</p></dd><dt><a name="CFGMGR_DATA_READ_ERROR"></a><span class="term">CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</span></dt><dd><p>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</p></dd><dt><a name="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_STOPPED_BY_KEYBOARD"></a><span class="term">CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
</p></dd><dt><a name="CONFIG_CCSESSION_MSG"></a><span class="term">CONFIG_CCSESSION_MSG error in CC session message: %1</span></dt><dd><p>
There was a problem with an incoming message on the command and control
channel. The message does not appear to be a valid command, and is
@@ -65,33 +364,36 @@ missing a required element or contains an unknown data format. This
most likely means that another BIND10 module is sending a bad message.
The message itself is ignored by this module.
</p></dd><dt><a name="CONFIG_CCSESSION_MSG_INTERNAL"></a><span class="term">CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</span></dt><dd><p>
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
-</p></dd><dt><a name="CONFIG_FOPEN_ERR"></a><span class="term">CONFIG_FOPEN_ERR error opening %1: %2</span></dt><dd><p>
-There was an error opening the given file.
-</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
-There was a parse error in the JSON file. The given file does not appear
-to be in valid JSON format. Please verify that the filename is correct
-and that the contents are valid JSON.
-</p></dd><dt><a name="CONFIG_MANAGER_CONFIG"></a><span class="term">CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</span></dt><dd><p>
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</p><p>
+The most likely cause of this error is a programming error. Please raise
+a bug report.
+</p></dd><dt><a name="CONFIG_GET_FAIL"></a><span class="term">CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</span></dt><dd><p>
The configuration manager returned an error when this module requested
the configuration. The full error message answer from the configuration
manager is appended to the log error. The most likely cause is that
the module is of a different (command specification) version than the
running configuration manager.
-</p></dd><dt><a name="CONFIG_MANAGER_MOD_SPEC"></a><span class="term">CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</span></dt><dd><p>
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
-</p></dd><dt><a name="CONFIG_MODULE_SPEC"></a><span class="term">CONFIG_MODULE_SPEC module specification error in %1: %2</span></dt><dd><p>
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
+</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_FORMAT"></a><span class="term">CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</span></dt><dd><p>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_REJECT"></a><span class="term">CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</span></dt><dd><p>
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+</p></dd><dt><a name="CONFIG_OPEN_FAIL"></a><span class="term">CONFIG_OPEN_FAIL error opening %1: %2</span></dt><dd><p>
+There was an error opening the given file. The reason for the failure
+is included in the message.
</p></dd><dt><a name="DATASRC_CACHE_CREATE"></a><span class="term">DATASRC_CACHE_CREATE creating the hotspot cache</span></dt><dd><p>
Debug information that the hotspot cache was created at startup.
</p></dd><dt><a name="DATASRC_CACHE_DESTROY"></a><span class="term">DATASRC_CACHE_DESTROY destroying the hotspot cache</span></dt><dd><p>
@@ -146,7 +448,7 @@ Debug information. The requested domain is an alias to a different domain,
returning the CNAME instead.
</p></dd><dt><a name="DATASRC_MEM_CNAME_COEXIST"></a><span class="term">DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</span></dt><dd><p>
This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
+other way around -- adding some other data to CNAME.
</p></dd><dt><a name="DATASRC_MEM_CNAME_TO_NONEMPTY"></a><span class="term">DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</span></dt><dd><p>
Someone or something tried to add a CNAME into a domain that already contains
some other data. But the protocol forbids coexistence of CNAME with anything
@@ -164,7 +466,7 @@ encountered on the way. This may lead to redirection to a different domain and
stop the search.
</p></dd><dt><a name="DATASRC_MEM_DNAME_FOUND"></a><span class="term">DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</span></dt><dd><p>
Debug information. A DNAME was found instead of the requested information.
-</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
It was requested for DNAME and NS records to be put into the same domain
which is not the apex (the top of the zone). This is forbidden by RFC
2672, section 3. This indicates a problem with provided data.
@@ -222,12 +524,12 @@ destroyed.
Debug information. A domain above wildcard was reached, but there's something
below the requested domain. Therefore the wildcard doesn't apply here. This
behaviour is specified by RFC 1034, section 4.3.3
-</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</span></dt><dd><p>
The software refuses to load DNAME records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
behave and BIND 9 refuses that as well. Please describe your intention using
different tools.
-</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</span></dt><dd><p>
The software refuses to load NS records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
behave and BIND 9 refuses that as well. Please describe your intention using
@@ -269,7 +571,7 @@ response message.
</p></dd><dt><a name="DATASRC_QUERY_DELEGATION"></a><span class="term">DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</span></dt><dd><p>
Debug information. The software is trying to identify delegation points on the
way down to the given domain.
-</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</span></dt><dd><p>
There was an CNAME and it was being followed. But it contains no records,
so there's nowhere to go. There will be no answer. This indicates a problem
with supplied data.
@@ -363,7 +665,7 @@ DNAMEs will be synthesized.
</p></dd><dt><a name="DATASRC_QUERY_TASK_FAIL"></a><span class="term">DATASRC_QUERY_TASK_FAIL task failed with %1</span></dt><dd><p>
The query subtask failed. The reason should have been reported by the subtask
already. The code is 1 for error, 2 for not implemented.
-</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</span></dt><dd><p>
A CNAME led to another CNAME and it led to another, and so on. After 16
CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
might possibly be a loop as well. Note that some of the CNAMEs might have
@@ -385,15 +687,15 @@ While processing a wildcard, a referral was met. But it wasn't possible to get
enough information for it. The code is 1 for error, 2 for not implemented.
</p></dd><dt><a name="DATASRC_SQLITE_CLOSE"></a><span class="term">DATASRC_SQLITE_CLOSE closing SQLite database</span></dt><dd><p>
Debug information. The SQLite data source is closing the database file.
-</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE sQLite data source created</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE SQLite data source created</span></dt><dd><p>
Debug information. An instance of SQLite data source is being created.
-</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY sQLite data source destroyed</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY SQLite data source destroyed</span></dt><dd><p>
Debug information. An instance of SQLite data source is being destroyed.
</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE"></a><span class="term">DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</span></dt><dd><p>
-Debug information. The SQLite data source is trying to identify, which zone
+Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOTFOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</span></dt><dd><p>
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
no such zone in our data.
</p></dd><dt><a name="DATASRC_SQLITE_FIND"></a><span class="term">DATASRC_SQLITE_FIND looking for RRset '%1/%2'</span></dt><dd><p>
Debug information. The SQLite data source is looking up a resource record
@@ -417,7 +719,7 @@ and type in the database.
Debug information. The SQLite data source is identifying if this domain is
a referral and where it goes.
</p></dd><dt><a name="DATASRC_SQLITE_FINDREF_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</span></dt><dd><p>
-The SQLite data source was trying to identify, if there's a referral. But
+The SQLite data source was trying to identify if there's a referral. But
it contains different class than the query was for.
</p></dd><dt><a name="DATASRC_SQLITE_FIND_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
The SQLite data source was looking up an RRset, but the data source contains
@@ -452,142 +754,173 @@ data source.
</p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
This indicates a programming error. An internal task of unknown type was
generated.
-</p></dd><dt><a name="LOGIMPL_ABOVEDBGMAX"></a><span class="term">LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
-</p></dd><dt><a name="LOGIMPL_BADDEBUG"></a><span class="term">LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</span></dt><dd><p>
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form. In particular,
-it starts DEBUG but does not end with an integer.
-</p></dd><dt><a name="LOGIMPL_BELOWDBGMIN"></a><span class="term">LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
-</p></dd><dt><a name="MSG_BADDESTINATION"></a><span class="term">MSG_BADDESTINATION unrecognized log destination: %1</span></dt><dd><p>
+</p></dd><dt><a name="LOGIMPL_ABOVE_MAX_DEBUG"></a><span class="term">LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BAD_DEBUG_STRING"></a><span class="term">LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</span></dt><dd><p>
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BELOW_MIN_DEBUG"></a><span class="term">LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOG_BAD_DESTINATION"></a><span class="term">LOG_BAD_DESTINATION unrecognized log destination: %1</span></dt><dd><p>
A logger destination value was given that was not recognized. The
destination should be one of "console", "file", or "syslog".
-</p></dd><dt><a name="MSG_BADSEVERITY"></a><span class="term">MSG_BADSEVERITY unrecognized log severity: %1</span></dt><dd><p>
+</p></dd><dt><a name="LOG_BAD_SEVERITY"></a><span class="term">LOG_BAD_SEVERITY unrecognized log severity: %1</span></dt><dd><p>
A logger severity value was given that was not recognized. The severity
should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
-</p></dd><dt><a name="MSG_BADSTREAM"></a><span class="term">MSG_BADSTREAM bad log console output stream: %1</span></dt><dd><p>
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
-</p></dd><dt><a name="MSG_DUPLNS"></a><span class="term">MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
-When reading a message file, more than one $NAMESPACE directive was found. In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
-</p></dd><dt><a name="MSG_DUPMSGID"></a><span class="term">MSG_DUPMSGID duplicate message ID (%1) in compiled code</span></dt><dd><p>
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules. (All message
-IDs should be unique throughout BIND10.) This has no impact on the operation
-of the server other that erroneous messages may be logged. (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded. However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
-</p></dd><dt><a name="MSG_IDNOTFND"></a><span class="term">MSG_IDNOTFND could not replace message text for '%1': no such message</span></dt><dd><p>
+</p></dd><dt><a name="LOG_BAD_STREAM"></a><span class="term">LOG_BAD_STREAM bad log console output stream: %1</span></dt><dd><p>
+A log console output stream was given that was not recognized. The output
+stream should be one of "stdout", or "stderr"
+</p></dd><dt><a name="LOG_DUPLICATE_MESSAGE_ID"></a><span class="term">LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</span></dt><dd><p>
+During start-up, BIND10 detected that the given message identification had
+been defined multiple times in the BIND10 code.
+</p><p>
+This has no ill-effects other than the possibility that an erronous
+message may be logged. However, as it is indicative of a programming
+error, please log a bug report.
+</p></dd><dt><a name="LOG_DUPLICATE_NAMESPACE"></a><span class="term">LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
+When reading a message file, more than one $NAMESPACE directive was found.
+Such a condition is regarded as an error and the read will be abandoned.
+</p></dd><dt><a name="LOG_INPUT_OPEN_FAIL"></a><span class="term">LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</span></dt><dd><p>
+The program was not able to open the specified input message file for
+the reason given.
+</p></dd><dt><a name="LOG_INVALID_MESSAGE_ID"></a><span class="term">LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</span></dt><dd><p>
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</p></dd><dt><a name="LOG_NAMESPACE_EXTRA_ARGS"></a><span class="term">LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+</p></dd><dt><a name="LOG_NAMESPACE_INVALID_ARG"></a><span class="term">LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+</p></dd><dt><a name="LOG_NAMESPACE_NO_ARGS"></a><span class="term">LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+</p></dd><dt><a name="LOG_NO_MESSAGE_ID"></a><span class="term">LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+</p></dd><dt><a name="LOG_NO_MESSAGE_TEXT"></a><span class="term">LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</p></dd><dt><a name="LOG_NO_SUCH_MESSAGE"></a><span class="term">LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</span></dt><dd><p>
During start-up a local message file was read. A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary. Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+</p><p>
+There may be several reasons why this message may appear:
+</p><p>
+- The message ID has been mis-spelled in the local message file.
+</p><p>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
</p><p>
-This message may appear a number of times in the file, once for every such
-unknown message identification.
-</p></dd><dt><a name="MSG_INVMSGID"></a><span class="term">MSG_INVMSGID line %1: invalid message identification '%2'</span></dt><dd><p>
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
-</p></dd><dt><a name="MSG_NOMSGID"></a><span class="term">MSG_NOMSGID line %1: message definition line found without a message ID</span></dt><dd><p>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
-</p></dd><dt><a name="MSG_NOMSGTXT"></a><span class="term">MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
-</p></dd><dt><a name="MSG_NSEXTRARG"></a><span class="term">MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
-</p></dd><dt><a name="MSG_NSINVARG"></a><span class="term">MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
-The $NAMESPACE argument should be a valid C++ namespace. The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct. The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
-</p></dd><dt><a name="MSG_NSNOARG"></a><span class="term">MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
-</p></dd><dt><a name="MSG_OPENIN"></a><span class="term">MSG_OPENIN unable to open message file %1 for input: %2</span></dt><dd><p>
-The program was not able to open the specified input message file for the
-reason given.
-</p></dd><dt><a name="MSG_OPENOUT"></a><span class="term">MSG_OPENOUT unable to open %1 for output: %2</span></dt><dd><p>
-The program was not able to open the specified output file for the reason
-given.
-</p></dd><dt><a name="MSG_PRFEXTRARG"></a><span class="term">MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created. This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
-</p></dd><dt><a name="MSG_PRFINVARG"></a><span class="term">MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
-The $PREFIX argument is used in a symbol name in a C++ header file. As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
-</p></dd><dt><a name="MSG_RDLOCMES"></a><span class="term">MSG_RDLOCMES reading local message file %1</span></dt><dd><p>
-This is an informational message output by BIND10 when it starts to read a
-local message file. (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
-</p></dd><dt><a name="MSG_READERR"></a><span class="term">MSG_READERR error reading from message file %1: %2</span></dt><dd><p>
+- The local file was written for an earlier version of the BIND10 software
+and the later version no longer generates that message.
+</p><p>
+Whatever the reason, there is no impact on the operation of BIND10.
+</p></dd><dt><a name="LOG_OPEN_OUTPUT_FAIL"></a><span class="term">LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</span></dt><dd><p>
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
+</p></dd><dt><a name="LOG_PREFIX_EXTRA_ARGS"></a><span class="term">LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</p><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND10.
+</p></dd><dt><a name="LOG_PREFIX_INVALID_ARG"></a><span class="term">LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restictions.
+</p><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND10.
+</p></dd><dt><a name="LOG_READING_LOCAL_FILE"></a><span class="term">LOG_READING_LOCAL_FILE reading local message file %1</span></dt><dd><p>
+This is an informational message output by BIND10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
+</p></dd><dt><a name="LOG_READ_ERROR"></a><span class="term">LOG_READ_ERROR error reading from message file %1: %2</span></dt><dd><p>
The specified error was encountered reading from the named message file.
-</p></dd><dt><a name="MSG_UNRECDIR"></a><span class="term">MSG_UNRECDIR line %1: unrecognised directive '%2'</span></dt><dd><p>
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
-</p></dd><dt><a name="MSG_WRITERR"></a><span class="term">MSG_WRITERR error writing to %1: %2</span></dt><dd><p>
-The specified error was encountered by the message compiler when writing to
-the named output file.
-</p></dd><dt><a name="NSAS_INVRESPSTR"></a><span class="term">NSAS_INVRESPSTR queried for %1 but got invalid response</span></dt><dd><p>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for a RR for the
-specified nameserver but received an invalid response. Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
-</p></dd><dt><a name="NSAS_INVRESPTC"></a><span class="term">NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
-</p></dd><dt><a name="NSAS_LOOKUPCANCEL"></a><span class="term">NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</span></dt><dd><p>
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
-</p></dd><dt><a name="NSAS_LOOKUPZONE"></a><span class="term">NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</span></dt><dd><p>
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
-</p></dd><dt><a name="NSAS_NSADDR"></a><span class="term">NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
-</p></dd><dt><a name="NSAS_NSLKUPFAIL"></a><span class="term">NSAS_NSLKUPFAIL failed to lookup any %1 for %2</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver. This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
-</p></dd><dt><a name="NSAS_NSLKUPSUCC"></a><span class="term">NSAS_NSLKUPSUCC found address %1 for %2</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
-</p></dd><dt><a name="NSAS_SETRTT"></a><span class="term">NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</span></dt><dd><p>
+</p></dd><dt><a name="LOG_UNRECOGNISED_DIRECTIVE"></a><span class="term">LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</span></dt><dd><p>
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
+</p></dd><dt><a name="LOG_WRITE_ERROR"></a><span class="term">LOG_WRITE_ERROR error writing to %1: %2</span></dt><dd><p>
+The specified error was encountered by the message compiler when writing
+to the named output file.
+</p></dd><dt><a name="NSAS_FIND_NS_ADDRESS"></a><span class="term">NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
+</p></dd><dt><a name="NSAS_FOUND_ADDRESS"></a><span class="term">NSAS_FOUND_ADDRESS found address %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
+</p></dd><dt><a name="NSAS_INVALID_RESPONSE"></a><span class="term">NSAS_INVALID_RESPONSE queried for %1 but got invalid response</span></dt><dd><p>
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</p><p>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+</p></dd><dt><a name="NSAS_LOOKUP_CANCEL"></a><span class="term">NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</span></dt><dd><p>
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
+</p></dd><dt><a name="NSAS_NS_LOOKUP_FAIL"></a><span class="term">NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
+</p></dd><dt><a name="NSAS_SEARCH_ZONE_NS"></a><span class="term">NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</span></dt><dd><p>
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
+</p></dd><dt><a name="NSAS_UPDATE_RTT"></a><span class="term">NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</span></dt><dd><p>
A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver. The RTT has been updated using the value given and the new RTT is
-displayed. (The RTT is subject to a calculation that damps out sudden
-changes. As a result, the new RTT is not necessarily equal to the RTT
-reported.)
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</p></dd><dt><a name="NSAS_WRONG_ANSWER"></a><span class="term">NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</p><p>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</p></dd><dt><a name="RESLIB_ANSWER"></a><span class="term">RESLIB_ANSWER answer received in response to query for <%1></span></dt><dd><p>
A debug message recording that an answer has been received to an upstream
query for the specified question. Previous debug messages will have indicated
@@ -599,95 +932,95 @@ the server to which the question was sent.
</p></dd><dt><a name="RESLIB_DEEPEST"></a><span class="term">RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</span></dt><dd><p>
A debug message, a cache lookup did not find the specified <name, class,
type> tuple in the cache; instead, the deepest delegation found is indicated.
-</p></dd><dt><a name="RESLIB_FOLLOWCNAME"></a><span class="term">RESLIB_FOLLOWCNAME following CNAME chain to <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_FOLLOW_CNAME"></a><span class="term">RESLIB_FOLLOW_CNAME following CNAME chain to <%1></span></dt><dd><p>
A debug message, a CNAME response was received and another query is being issued
for the <name, class, type> tuple.
-</p></dd><dt><a name="RESLIB_LONGCHAIN"></a><span class="term">RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_LONG_CHAIN"></a><span class="term">RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
A debug message recording that a CNAME response has been received to an upstream
query for the specified question (Previous debug messages will have indicated
the server to which the question was sent). However, receipt of this CNAME
has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
is where on CNAME points to another) and so an error is being returned.
-</p></dd><dt><a name="RESLIB_NONSRRSET"></a><span class="term">RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NO_NS_RRSET"></a><span class="term">RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
A debug message, this indicates that a response was received for the specified
-query and was categorised as a referral. However, the received message did
+query and was categorized as a referral. However, the received message did
not contain any NS RRsets. This may indicate a programming error in the
response classification code.
-</p></dd><dt><a name="RESLIB_NSASLOOK"></a><span class="term">RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NSAS_LOOKUP"></a><span class="term">RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
A debug message, the RunningQuery object is querying the NSAS for the
nameservers for the specified zone.
-</p></dd><dt><a name="RESLIB_NXDOMRR"></a><span class="term">RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NXDOM_NXRR"></a><span class="term">RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
A debug message recording that either a NXDOMAIN or an NXRRSET response has
been received to an upstream query for the specified question. Previous debug
messages will have indicated the server to which the question was sent.
</p></dd><dt><a name="RESLIB_PROTOCOL"></a><span class="term">RESLIB_PROTOCOL protocol error in answer for %1: %3</span></dt><dd><p>
A debug message indicating that a protocol error was received. As there
are no retries left, an error will be reported.
-</p></dd><dt><a name="RESLIB_PROTOCOLRTRY"></a><span class="term">RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_PROTOCOL_RETRY"></a><span class="term">RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
-</p></dd><dt><a name="RESLIB_RCODERR"></a><span class="term">RESLIB_RCODERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RCODE_ERR"></a><span class="term">RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
-</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
-A debug message recording that a referral response has been received to an
-upstream query for the specified question. Previous debug messages will
-have indicated the server to which the question was sent.
-</p></dd><dt><a name="RESLIB_REFERZONE"></a><span class="term">RESLIB_REFERZONE referred to zone %1</span></dt><dd><p>
-A debug message indicating that the last referral message was to the specified
-zone.
-</p></dd><dt><a name="RESLIB_RESCAFND"></a><span class="term">RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_FIND"></a><span class="term">RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
This is a debug message and indicates that a RecursiveQuery object found the
the specified <name, class, type> tuple in the cache. The instance number
at the end of the message indicates which of the two resolve() methods has
been called.
-</p></dd><dt><a name="RESLIB_RESCANOTFND"></a><span class="term">RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_NO_FIND"></a><span class="term">RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
This is a debug message and indicates that the look in the cache made by the
RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
object has been created to resolve the question. The instance number at
the end of the message indicates which of the two resolve() methods has
been called.
+</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_REFER_ZONE"></a><span class="term">RESLIB_REFER_ZONE referred to zone %1</span></dt><dd><p>
+A debug message indicating that the last referral message was to the specified
+zone.
</p></dd><dt><a name="RESLIB_RESOLVE"></a><span class="term">RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</span></dt><dd><p>
A debug message, the RecursiveQuery::resolve method has been called to resolve
the specified <name, class, type> tuple. The first action will be to lookup
the specified tuple in the cache. The instance number at the end of the
message indicates which of the two resolve() methods has been called.
-</p></dd><dt><a name="RESLIB_RRSETFND"></a><span class="term">RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RRSET_FOUND"></a><span class="term">RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
A debug message, indicating that when RecursiveQuery::resolve queried the
cache, a single RRset was found which was put in the answer. The instance
number at the end of the message indicates which of the two resolve()
methods has been called.
</p></dd><dt><a name="RESLIB_RTT"></a><span class="term">RESLIB_RTT round-trip time of last query calculated as %1 ms</span></dt><dd><p>
A debug message giving the round-trip time of the last query and response.
-</p></dd><dt><a name="RESLIB_RUNCAFND"></a><span class="term">RESLIB_RUNCAFND found <%1> in the cache</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_FIND"></a><span class="term">RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</span></dt><dd><p>
This is a debug message and indicates that a RunningQuery object found
the specified <name, class, type> tuple in the cache.
-</p></dd><dt><a name="RESLIB_RUNCALOOK"></a><span class="term">RESLIB_RUNCALOOK looking up up <%1> in the cache</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_LOOKUP"></a><span class="term">RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</span></dt><dd><p>
This is a debug message and indicates that a RunningQuery object has made
a call to its doLookup() method to look up the specified <name, class, type>
tuple, the first action of which will be to examine the cache.
-</p></dd><dt><a name="RESLIB_RUNQUFAIL"></a><span class="term">RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_FAIL"></a><span class="term">RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</span></dt><dd><p>
A debug message indicating that a RunningQuery's failure callback has been
called because all nameservers for the zone in question are unreachable.
-</p></dd><dt><a name="RESLIB_RUNQUSUCC"></a><span class="term">RESLIB_RUNQUSUCC success callback - sending query to %1</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_SUCCESS"></a><span class="term">RESLIB_RUNQ_SUCCESS success callback - sending query to %1</span></dt><dd><p>
A debug message indicating that a RunningQuery's success callback has been
called because a nameserver has been found, and that a query is being sent
to the specified nameserver.
-</p></dd><dt><a name="RESLIB_TESTSERV"></a><span class="term">RESLIB_TESTSERV setting test server to %1(%2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_TEST_SERVER"></a><span class="term">RESLIB_TEST_SERVER setting test server to %1(%2)</span></dt><dd><p>
This is an internal debugging message and is only generated in unit tests.
It indicates that all upstream queries from the resolver are being routed to
the specified server, regardless of the address of the nameserver to which
the query would normally be routed. As it should never be seen in normal
operation, it is a warning message instead of a debug message.
-</p></dd><dt><a name="RESLIB_TESTUPSTR"></a><span class="term">RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_TEST_UPSTREAM"></a><span class="term">RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</span></dt><dd><p>
This is a debug message and should only be seen in unit tests. A query for
the specified <name, class, type> tuple is being sent to a test nameserver
whose address is given in the message.
</p></dd><dt><a name="RESLIB_TIMEOUT"></a><span class="term">RESLIB_TIMEOUT query <%1> to %2 timed out</span></dt><dd><p>
A debug message indicating that the specified query has timed out and as
there are no retries left, an error will be reported.
-</p></dd><dt><a name="RESLIB_TIMEOUTRTRY"></a><span class="term">RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_TIMEOUT_RETRY"></a><span class="term">RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
A debug message indicating that the specified query has timed out and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
@@ -699,118 +1032,134 @@ gives no cause for concern.
</p></dd><dt><a name="RESLIB_UPSTREAM"></a><span class="term">RESLIB_UPSTREAM sending upstream query for <%1> to %2</span></dt><dd><p>
A debug message indicating that a query for the specified <name, class, type>
tuple is being sent to a nameserver whose address is given in the message.
-</p></dd><dt><a name="RESOLVER_AXFRTCP"></a><span class="term">RESOLVER_AXFRTCP AXFR request received over TCP</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_AXFR_TCP"></a><span class="term">RESOLVER_AXFR_TCP AXFR request received over TCP</span></dt><dd><p>
A debug message, the resolver received a NOTIFY message over TCP. The server
cannot process it and will return an error message to the sender with the
RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_AXFRUDP"></a><span class="term">RESOLVER_AXFRUDP AXFR request received over UDP</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_AXFR_UDP"></a><span class="term">RESOLVER_AXFR_UDP AXFR request received over UDP</span></dt><dd><p>
A debug message, the resolver received a NOTIFY message over UDP. The server
cannot process it (and in any case, an AXFR request should be sent over TCP)
and will return an error message to the sender with the RCODE set to FORMERR.
-</p></dd><dt><a name="RESOLVER_CLTMOSMALL"></a><span class="term">RESOLVER_CLTMOSMALL client timeout of %1 is too small</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_CLIENT_TIME_SMALL"></a><span class="term">RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</span></dt><dd><p>
An error indicating that the configuration value specified for the query
timeout is too small.
-</p></dd><dt><a name="RESOLVER_CONFIGCHAN"></a><span class="term">RESOLVER_CONFIGCHAN configuration channel created</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_CONFIG_CHANNEL"></a><span class="term">RESOLVER_CONFIG_CHANNEL configuration channel created</span></dt><dd><p>
A debug message, output when the resolver has successfully established a
connection to the configuration channel.
-</p></dd><dt><a name="RESOLVER_CONFIGERR"></a><span class="term">RESOLVER_CONFIGERR error in configuration: %1</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_CONFIG_ERROR"></a><span class="term">RESOLVER_CONFIG_ERROR error in configuration: %1</span></dt><dd><p>
An error was detected in a configuration update received by the resolver. This
may be in the format of the configuration message (in which case this is a
programming error) or it may be in the data supplied (in which case it is
a user error). The reason for the error, given as a parameter in the message,
will give more details.
-</p></dd><dt><a name="RESOLVER_CONFIGLOAD"></a><span class="term">RESOLVER_CONFIGLOAD configuration loaded</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_CONFIG_LOADED"></a><span class="term">RESOLVER_CONFIG_LOADED configuration loaded</span></dt><dd><p>
A debug message, output when the resolver configuration has been successfully
loaded.
-</p></dd><dt><a name="RESOLVER_CONFIGUPD"></a><span class="term">RESOLVER_CONFIGUPD configuration updated: %1</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_CONFIG_UPDATED"></a><span class="term">RESOLVER_CONFIG_UPDATED configuration updated: %1</span></dt><dd><p>
A debug message, the configuration has been updated with the specified
information.
</p></dd><dt><a name="RESOLVER_CREATED"></a><span class="term">RESOLVER_CREATED main resolver object created</span></dt><dd><p>
A debug message, output when the Resolver() object has been created.
-</p></dd><dt><a name="RESOLVER_DNSMSGRCVD"></a><span class="term">RESOLVER_DNSMSGRCVD DNS message received: %1</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_RECEIVED"></a><span class="term">RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</span></dt><dd><p>
A debug message, this always precedes some other logging message and is the
formatted contents of the DNS packet that the other message refers to.
-</p></dd><dt><a name="RESOLVER_DNSMSGSENT"></a><span class="term">RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_SENT"></a><span class="term">RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
A debug message, this contains details of the response sent back to the querying
system.
</p></dd><dt><a name="RESOLVER_FAILED"></a><span class="term">RESOLVER_FAILED resolver failed, reason: %1</span></dt><dd><p>
This is an error message output when an unhandled exception is caught by the
resolver. All it can do is to shut down.
-</p></dd><dt><a name="RESOLVER_FWDADDR"></a><span class="term">RESOLVER_FWDADDR setting forward address %1(%2)</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_FORWARD_ADDRESS"></a><span class="term">RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</span></dt><dd><p>
This message may appear multiple times during startup, and it lists the
forward addresses used by the resolver when running in forwarding mode.
-</p></dd><dt><a name="RESOLVER_FWDQUERY"></a><span class="term">RESOLVER_FWDQUERY processing forward query</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_FORWARD_QUERY"></a><span class="term">RESOLVER_FORWARD_QUERY processing forward query</span></dt><dd><p>
The received query has passed all checks and is being forwarded to upstream
servers.
-</p></dd><dt><a name="RESOLVER_HDRERR"></a><span class="term">RESOLVER_HDRERR message received, exception when processing header: %1</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_HEADER_ERROR"></a><span class="term">RESOLVER_HEADER_ERROR message received, exception when processing header: %1</span></dt><dd><p>
A debug message noting that an exception occurred during the processing of
a received packet. The packet has been dropped.
</p></dd><dt><a name="RESOLVER_IXFR"></a><span class="term">RESOLVER_IXFR IXFR request received</span></dt><dd><p>
The resolver received a NOTIFY message over TCP. The server cannot process it
and will return an error message to the sender with the RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_LKTMOSMALL"></a><span class="term">RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_LOOKUP_TIME_SMALL"></a><span class="term">RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</span></dt><dd><p>
An error indicating that the configuration value specified for the lookup
timeout is too small.
-</p></dd><dt><a name="RESOLVER_NFYNOTAUTH"></a><span class="term">RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</span></dt><dd><p>
-The resolver received a NOTIFY message. As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
-</p></dd><dt><a name="RESOLVER_NORMQUERY"></a><span class="term">RESOLVER_NORMQUERY processing normal query</span></dt><dd><p>
-The received query has passed all checks and is being processed by the resolver.
-</p></dd><dt><a name="RESOLVER_NOROOTADDR"></a><span class="term">RESOLVER_NOROOTADDR no root addresses available</span></dt><dd><p>
-A warning message during startup, indicates that no root addresses have been
-set. This may be because the resolver will get them from a priming query.
-</p></dd><dt><a name="RESOLVER_NOTIN"></a><span class="term">RESOLVER_NOTIN non-IN class request received, returning REFUSED message</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_MESSAGE_ERROR"></a><span class="term">RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+A debug message noting that the resolver received a message and the
+parsing of the body of the message failed due to some error (although
+the parsing of the header succeeded). The message parameters give a
+textual description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_NEGATIVE_RETRIES"></a><span class="term">RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</span></dt><dd><p>
+An error message indicating that the resolver configuration has specified a
+negative retry count. Only zero or positive values are valid.
+</p></dd><dt><a name="RESOLVER_NON_IN_PACKET"></a><span class="term">RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</span></dt><dd><p>
A debug message, the resolver has received a DNS packet that was not IN class.
The resolver cannot handle such packets, so is returning a REFUSED response to
the sender.
-</p></dd><dt><a name="RESOLVER_NOTONEQUES"></a><span class="term">RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_NORMAL_QUERY"></a><span class="term">RESOLVER_NORMAL_QUERY processing normal query</span></dt><dd><p>
+The received query has passed all checks and is being processed by the resolver.
+</p></dd><dt><a name="RESOLVER_NOTIFY_RECEIVED"></a><span class="term">RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</span></dt><dd><p>
+The resolver received a NOTIFY message. As the server is not authoritative it
+cannot process it, so it returns an error message to the sender with the RCODE
+set to NOTAUTH.
+</p></dd><dt><a name="RESOLVER_NOT_ONE_QUESTION"></a><span class="term">RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</span></dt><dd><p>
A debug message, the resolver received a query that contained the number of
entires in the question section detailed in the message. This is a malformed
message, as a DNS query must contain only one question. The resolver will
return a message to the sender with the RCODE set to FORMERR.
-</p></dd><dt><a name="RESOLVER_OPCODEUNS"></a><span class="term">RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</span></dt><dd><p>
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes). It will return a message to the sender
-with the RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_PARSEERR"></a><span class="term">RESOLVER_PARSEERR error parsing received message: %1 - returning %2</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_NO_ROOT_ADDRESS"></a><span class="term">RESOLVER_NO_ROOT_ADDRESS no root addresses available</span></dt><dd><p>
+A warning message during startup, indicates that no root addresses have been
+set. This may be because the resolver will get them from a priming query.
+</p></dd><dt><a name="RESOLVER_PARSE_ERROR"></a><span class="term">RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
A debug message noting that the resolver received a message and the parsing
of the body of the message failed due to some non-protocol related reason
(although the parsing of the header succeeded). The message parameters give
a textual description of the problem and the RCODE returned.
-</p></dd><dt><a name="RESOLVER_PRINTMSG"></a><span class="term">RESOLVER_PRINTMSG print message command, aeguments are: %1</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_PRINT_COMMAND"></a><span class="term">RESOLVER_PRINT_COMMAND print message command, arguments are: %1</span></dt><dd><p>
This message is logged when a "print_message" command is received over the
command channel.
-</p></dd><dt><a name="RESOLVER_PROTERR"></a><span class="term">RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_PROTOCOL_ERROR"></a><span class="term">RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
A debug message noting that the resolver received a message and the parsing
of the body of the message failed due to some protocol error (although the
parsing of the header succeeded). The message parameters give a textual
description of the problem and the RCODE returned.
-</p></dd><dt><a name="RESOLVER_QUSETUP"></a><span class="term">RESOLVER_QUSETUP query setup</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_QUERY_ACCEPTED"></a><span class="term">RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</span></dt><dd><p>
+A debug message that indicates an incoming query is accepted in terms of
+the query ACL. The log message shows the query in the form of
+<query name>/<query type>/<query class>, and the client that sends the
+query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_DROPPED"></a><span class="term">RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</span></dt><dd><p>
+An informational message that indicates an incoming query is dropped
+in terms of the query ACL. Unlike the RESOLVER_QUERY_REJECTED
+case, the server does not return any response. The log message
+shows the query in the form of <query name>/<query type>/<query
+class>, and the client that sends the query in the form of <Source
+IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_REJECTED"></a><span class="term">RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</span></dt><dd><p>
+An informational message that indicates an incoming query is rejected
+in terms of the query ACL. This results in a response with an RCODE of
+REFUSED. The log message shows the query in the form of <query
+name>/<query type>/<query class>, and the client that sends the
+query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_SETUP"></a><span class="term">RESOLVER_QUERY_SETUP query setup</span></dt><dd><p>
A debug message noting that the resolver is creating a RecursiveQuery object.
-</p></dd><dt><a name="RESOLVER_QUSHUT"></a><span class="term">RESOLVER_QUSHUT query shutdown</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_QUERY_SHUTDOWN"></a><span class="term">RESOLVER_QUERY_SHUTDOWN query shutdown</span></dt><dd><p>
A debug message noting that the resolver is destroying a RecursiveQuery object.
-</p></dd><dt><a name="RESOLVER_QUTMOSMALL"></a><span class="term">RESOLVER_QUTMOSMALL query timeout of %1 is too small</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_QUERY_TIME_SMALL"></a><span class="term">RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</span></dt><dd><p>
An error indicating that the configuration value specified for the query
timeout is too small.
-</p></dd><dt><a name="RESOLVER_RECURSIVE"></a><span class="term">RESOLVER_RECURSIVE running in recursive mode</span></dt><dd><p>
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
-</p></dd><dt><a name="RESOLVER_RECVMSG"></a><span class="term">RESOLVER_RECVMSG resolver has received a DNS message</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_RECEIVED_MESSAGE"></a><span class="term">RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</span></dt><dd><p>
A debug message indicating that the resolver has received a message. Depending
on the debug settings, subsequent log output will indicate the nature of the
message.
-</p></dd><dt><a name="RESOLVER_RETRYNEG"></a><span class="term">RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</span></dt><dd><p>
-An error message indicating that the resolver configuration has specified a
-negative retry count. Only zero or positive values are valid.
-</p></dd><dt><a name="RESOLVER_ROOTADDR"></a><span class="term">RESOLVER_ROOTADDR setting root address %1(%2)</span></dt><dd><p>
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
-</p></dd><dt><a name="RESOLVER_SERVICE"></a><span class="term">RESOLVER_SERVICE service object created</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_RECURSIVE"></a><span class="term">RESOLVER_RECURSIVE running in recursive mode</span></dt><dd><p>
+This is an informational message that appears at startup noting that the
+resolver is running in recursive mode.
+</p></dd><dt><a name="RESOLVER_SERVICE_CREATED"></a><span class="term">RESOLVER_SERVICE_CREATED service object created</span></dt><dd><p>
A debug message, output when the main service object (which handles the
received queries) is created.
-</p></dd><dt><a name="RESOLVER_SETPARAM"></a><span class="term">RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
-A debug message, lists the parameters associated with the message. These are:
+</p></dd><dt><a name="RESOLVER_SET_PARAMS"></a><span class="term">RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
+A debug message, lists the parameters being set for the resolver. These are:
query timeout: the timeout (in ms) used for queries originated by the resolver
to upstream servers. Client timeout: the interval to resolver a query by
a client: after this time, the resolver sends back a SERVFAIL to the client
@@ -819,14 +1168,20 @@ resolver gives up trying to resolve a query. Retry count: the number of times
the resolver will retry a query to an upstream server if it gets a timeout.
</p><p>
The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
upstream nameservers. Even if none of these queries timeout, the total time
taken to perform all the queries may exceed the client timeout. When this
happens, a SERVFAIL is returned to the client, but the resolver continues
with the resolution process. Data received is added to the cache. However,
-there comes a time - the lookup timeout - when even the resolve gives up.
+there comes a time - the lookup timeout - when even the resolver gives up.
At this point it will wait for pending upstream queries to complete or
timeout and drop the query.
+</p></dd><dt><a name="RESOLVER_SET_QUERY_ACL"></a><span class="term">RESOLVER_SET_QUERY_ACL query ACL is configured</span></dt><dd><p>
+A debug message that appears when a new query ACL is configured for the
+resolver.
+</p></dd><dt><a name="RESOLVER_SET_ROOT_ADDRESS"></a><span class="term">RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</span></dt><dd><p>
+This message may appear multiple times during startup; it lists the root
+addresses used by the resolver.
</p></dd><dt><a name="RESOLVER_SHUTDOWN"></a><span class="term">RESOLVER_SHUTDOWN resolver shutdown complete</span></dt><dd><p>
This information message is output when the resolver has shut down.
</p></dd><dt><a name="RESOLVER_STARTED"></a><span class="term">RESOLVER_STARTED resolver started</span></dt><dd><p>
@@ -834,8 +1189,166 @@ This informational message is output by the resolver when all initialization
has been completed and it is entering its main loop.
</p></dd><dt><a name="RESOLVER_STARTING"></a><span class="term">RESOLVER_STARTING starting resolver with command line '%1'</span></dt><dd><p>
An informational message, this is output when the resolver starts up.
-</p></dd><dt><a name="RESOLVER_UNEXRESP"></a><span class="term">RESOLVER_UNEXRESP received unexpected response, ignoring</span></dt><dd><p>
+</p></dd><dt><a name="RESOLVER_UNEXPECTED_RESPONSE"></a><span class="term">RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</span></dt><dd><p>
A debug message noting that the server has received a response instead of a
query and is ignoring it.
+</p></dd><dt><a name="RESOLVER_UNSUPPORTED_OPCODE"></a><span class="term">RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</span></dt><dd><p>
+A debug message, the resolver received a message with an unsupported opcode
+(it can only process QUERY opcodes). It will return a message to the sender
+with the RCODE set to NOTIMP.
+</p></dd><dt><a name="XFRIN_AXFR_DATABASE_FAILURE"></a><span class="term">XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_INTERNAL_FAILURE"></a><span class="term">XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_STARTED"></a><span class="term">XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</span></dt><dd><p>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</span></dt><dd><p>
+The AXFR transfer of the given zone was successfully completed.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_ADDR_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</span></dt><dd><p>
+The given master address is not a valid IP address.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_PORT_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</span></dt><dd><p>
+The master port as read from the configuration is not a valid port number.
+</p></dd><dt><a name="XFRIN_BAD_TSIG_KEY_STRING"></a><span class="term">XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFRIN_BAD_ZONE_CLASS"></a><span class="term">XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</span></dt><dd><p>
+The zone class as read from the configuration is not a valid DNS class.
+</p></dd><dt><a name="XFRIN_CC_SESSION_ERROR"></a><span class="term">XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</p></dd><dt><a name="XFRIN_COMMAND_ERROR"></a><span class="term">XFRIN_COMMAND_ERROR error while executing command '%1': %2</span></dt><dd><p>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</p></dd><dt><a name="XFRIN_CONNECT_MASTER"></a><span class="term">XFRIN_CONNECT_MASTER error connecting to master at %1: %2</span></dt><dd><p>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</p></dd><dt><a name="XFRIN_IMPORT_DNS"></a><span class="term">XFRIN_IMPORT_DNS error importing python DNS module: %1</span></dt><dd><p>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR"></a><span class="term">XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</span></dt><dd><p>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER"></a><span class="term">XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</span></dt><dd><p>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</p></dd><dt><a name="XFRIN_RETRANSFER_UNKNOWN_ZONE"></a><span class="term">XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</span></dt><dd><p>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</p></dd><dt><a name="XFRIN_STARTING"></a><span class="term">XFRIN_STARTING starting resolver with command line '%1'</span></dt><dd><p>
+An informational message, this is output when the resolver starts up.
+</p></dd><dt><a name="XFRIN_STOPPED_BY_KEYBOARD"></a><span class="term">XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFRIN_UNKNOWN_ERROR"></a><span class="term">XFRIN_UNKNOWN_ERROR unknown error: %1</span></dt><dd><p>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_DONE"></a><span class="term">XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</span></dt><dd><p>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_ERROR"></a><span class="term">XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</span></dt><dd><p>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_FAILED"></a><span class="term">XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</span></dt><dd><p>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_STARTED"></a><span class="term">XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</span></dt><dd><p>
+A transfer out of the given zone has started.
+</p></dd><dt><a name="XFROUT_BAD_TSIG_KEY_STRING"></a><span class="term">XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFROUT_CC_SESSION_ERROR"></a><span class="term">XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</p></dd><dt><a name="XFROUT_CC_SESSION_TIMEOUT_ERROR"></a><span class="term">XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</span></dt><dd><p>
+There was a problem reading a response from antoher module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</p></dd><dt><a name="XFROUT_FETCH_REQUEST_ERROR"></a><span class="term">XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</span></dt><dd><p>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</p></dd><dt><a name="XFROUT_HANDLE_QUERY_ERROR"></a><span class="term">XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</span></dt><dd><p>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</p></dd><dt><a name="XFROUT_IMPORT"></a><span class="term">XFROUT_IMPORT error importing python module: %1</span></dt><dd><p>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG"></a><span class="term">XFROUT_NEW_CONFIG Update xfrout configuration</span></dt><dd><p>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG_DONE"></a><span class="term">XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</span></dt><dd><p>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</p></dd><dt><a name="XFROUT_NOTIFY_COMMAND"></a><span class="term">XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</span></dt><dd><p>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</p></dd><dt><a name="XFROUT_PARSE_QUERY_ERROR"></a><span class="term">XFROUT_PARSE_QUERY_ERROR error parsing query: %1</span></dt><dd><p>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</p></dd><dt><a name="XFROUT_PROCESS_REQUEST_ERROR"></a><span class="term">XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</span></dt><dd><p>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</p></dd><dt><a name="XFROUT_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</p></dd><dt><a name="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR"></a><span class="term">XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</span></dt><dd><p>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</p></dd><dt><a name="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</span></dt><dd><p>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</p></dd><dt><a name="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</span></dt><dd><p>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</p></dd><dt><a name="XFROUT_SOCKET_SELECT_ERROR"></a><span class="term">XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</span></dt><dd><p>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</p></dd><dt><a name="XFROUT_STOPPED_BY_KEYBOARD"></a><span class="term">XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFROUT_STOPPING"></a><span class="term">XFROUT_STOPPING the xfrout daemon is shutting down</span></dt><dd><p>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</p></dd><dt><a name="XFROUT_UNIX_SOCKET_FILE_IN_USE"></a><span class="term">XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</span></dt><dd><p>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
</p></dd></dl></div><p>
</p></div></div></body></html>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
index eaa8bb9..d146a9c 100644
--- a/doc/guide/bind10-messages.xml
+++ b/doc/guide/bind10-messages.xml
@@ -5,6 +5,12 @@
<!ENTITY % version SYSTEM "version.ent">
%version;
]>
+<!--
+ This XML document is generated using the system_messages.py tool
+ based on the .mes message files.
+
+ Do not edit this file.
+-->
<book>
<?xml-stylesheet href="bind10-guide.css" type="text/css"?>
@@ -62,16 +68,16 @@
<para>
<variablelist>
-<varlistentry id="ASIODNS_FETCHCOMP">
-<term>ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</term>
+<varlistentry id="ASIODNS_FETCH_COMPLETED">
+<term>ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</term>
<listitem><para>
-A debug message, this records the the upstream fetch (a query made by the
+A debug message, this records that the upstream fetch (a query made by the
resolver on behalf of its client) to the specified address has completed.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_FETCHSTOP">
-<term>ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</term>
+<varlistentry id="ASIODNS_FETCH_STOPPED">
+<term>ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</term>
<listitem><para>
An external component has requested the halting of an upstream fetch. This
is an allowed operation, and the message should only appear if debug is
@@ -79,27 +85,27 @@ enabled.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_OPENSOCK">
-<term>ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</term>
+<varlistentry id="ASIODNS_OPEN_SOCKET">
+<term>ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</term>
<listitem><para>
The asynchronous I/O code encountered an error when trying to open a socket
of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
+The number of the system error that cause the problem is given in the
message.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_RECVSOCK">
-<term>ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</term>
+<varlistentry id="ASIODNS_READ_DATA">
+<term>ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</term>
<listitem><para>
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol. The the number of the system
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
error that cause the problem is given in the message.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_RECVTMO">
-<term>ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</term>
+<varlistentry id="ASIODNS_READ_TIMEOUT">
+<term>ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</term>
<listitem><para>
An upstream fetch from the specified address timed out. This may happen for
any number of reasons and is most probably a problem at the remote server
@@ -108,8 +114,8 @@ enabled.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_SENDSOCK">
-<term>ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</term>
+<varlistentry id="ASIODNS_SEND_DATA">
+<term>ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</term>
<listitem><para>
The asynchronous I/O code encountered an error when trying send data to
the specified address on the given protocol. The the number of the system
@@ -117,20 +123,674 @@ error that cause the problem is given in the message.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_UNKORIGIN">
-<term>ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<varlistentry id="ASIODNS_UNKNOWN_ORIGIN">
+<term>ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<listitem><para>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_RESULT">
+<term>ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<listitem><para>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_ERROR">
+<term>AUTH_AXFR_ERROR error handling AXFR request: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_UDP">
+<term>AUTH_AXFR_UDP AXFR query received over UDP</term>
+<listitem><para>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_COMMAND_FAILED">
+<term>AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</term>
+<listitem><para>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_CREATED">
+<term>AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</term>
+<listitem><para>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_ESTABLISHED">
+<term>AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</term>
+<listitem><para>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_STARTED">
+<term>AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</term>
+<listitem><para>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_LOAD_FAIL">
+<term>AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</term>
+<listitem><para>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_UPDATE_FAIL">
+<term>AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</term>
+<listitem><para>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DATA_SOURCE">
+<term>AUTH_DATA_SOURCE data source database file: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DNS_SERVICES_CREATED">
+<term>AUTH_DNS_SERVICES_CREATED DNS services created</term>
+<listitem><para>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritiative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_HEADER_PARSE_FAIL">
+<term>AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_TSIG">
+<term>AUTH_LOAD_TSIG loading TSIG keys</term>
+<listitem><para>
+This is a debug message indicating that the authoritiative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_ZONE">
+<term>AUTH_LOAD_ZONE loaded zone %1/%2</term>
+<listitem><para>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_DISABLED">
+<term>AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_ENABLED">
+<term>AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_QUESTIONS">
+<term>AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_RRTYPE">
+<term>AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_STATS_SESSION">
+<term>AUTH_NO_STATS_SESSION session interface for statistics is not available</term>
+<listitem><para>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_XFRIN">
+<term>AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PARSE_ERROR">
+<term>AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PROTOCOL_ERROR">
+<term>AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_RECEIVED">
+<term>AUTH_PACKET_RECEIVED message received:\n%1</term>
+<listitem><para>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</para><para>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PROCESS_FAIL">
+<term>AUTH_PROCESS_FAIL message processing failure: %1</term>
+<listitem><para>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</para><para>
+The server will return a SERVFAIL error code to the sender of the packet.
+However, this message indicates a potential error in the server.
+Please open a bug ticket for this issue.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_COMMAND">
+<term>AUTH_RECEIVED_COMMAND command '%1' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_SENDSTATS">
+<term>AUTH_RECEIVED_SENDSTATS command 'sendstats' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RESPONSE_RECEIVED">
+<term>AUTH_RESPONSE_RECEIVED received response message, ignoring</term>
+<listitem><para>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_ERROR_RESPONSE">
+<term>AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</term>
<listitem><para>
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_UNKRESULT">
-<term>ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<varlistentry id="AUTH_SEND_NORMAL_RESPONSE">
+<term>AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</term>
<listitem><para>
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message). This message should
-not appear and may indicate an internal error. Please enter a bug report.
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_CREATED">
+<term>AUTH_SERVER_CREATED server created</term>
+<listitem><para>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_FAILED">
+<term>AUTH_SERVER_FAILED server failed: %1</term>
+<listitem><para>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_STARTED">
+<term>AUTH_SERVER_STARTED server started</term>
+<listitem><para>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SQLITE3">
+<term>AUTH_SQLITE3 nothing to do for loading sqlite3</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_CREATED">
+<term>AUTH_STATS_CHANNEL_CREATED STATS session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_ESTABLISHED">
+<term>AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_COMMS">
+<term>AUTH_STATS_COMMS communication error in sending statistics data: %1</term>
+<listitem><para>
+An error was encountered when the authoritiative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMEOUT">
+<term>AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</term>
+<listitem><para>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_DISABLED">
+<term>AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_SET">
+<term>AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_UNSUPPORTED_OPCODE">
+<term>AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</term>
+<listitem><para>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_CREATED">
+<term>AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_ESTABLISHED">
+<term>AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_COMMS">
+<term>AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_ERROR">
+<term>AUTH_ZONEMGR_ERROR received error response from zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ASYNC_READ_FAILED">
+<term>CC_ASYNC_READ_FAILED asynchronous read failed</term>
+<listitem><para>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_CONN_ERROR">
+<term>CC_CONN_ERROR error connecting to message queue (%1)</term>
+<listitem><para>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_DISCONNECT">
+<term>CC_DISCONNECT disconnecting from message queue daemon</term>
+<listitem><para>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISH">
+<term>CC_ESTABLISH trying to establish connection with message queue daemon at %1</term>
+<listitem><para>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISHED">
+<term>CC_ESTABLISHED successfully connected to message queue daemon</term>
+<listitem><para>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVE">
+<term>CC_GROUP_RECEIVE trying to receive a message</term>
+<listitem><para>
+Debug message, noting that a message is expected to come over the command
+channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVED">
+<term>CC_GROUP_RECEIVED message arrived ('%1', '%2')</term>
+<listitem><para>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_SEND">
+<term>CC_GROUP_SEND sending message '%1' to group '%2'</term>
+<listitem><para>
+Debug message, we're about to send a message over the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_INVALID_LENGTHS">
+<term>CC_INVALID_LENGTHS invalid length parameters (%1, %2)</term>
+<listitem><para>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of message, the second length of the header. The header and it's length
+(2 bytes) is counted in the total length.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_LENGTH_NOT_READY">
+<term>CC_LENGTH_NOT_READY length not ready</term>
+<listitem><para>
+There should be data representing length of message on the socket, but it
+is not there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MESSAGE">
+<term>CC_NO_MESSAGE no message ready to be received yet</term>
+<listitem><para>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MSGQ">
+<term>CC_NO_MSGQ unable to connect to message queue (%1)</term>
+<listitem><para>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_ERROR">
+<term>CC_READ_ERROR error reading data from command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_EXCEPTION">
+<term>CC_READ_EXCEPTION error reading data from command channel (%1)</term>
+<listitem><para>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_REPLY">
+<term>CC_REPLY replying to message from '%1' with '%2'</term>
+<listitem><para>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SET_TIMEOUT">
+<term>CC_SET_TIMEOUT setting timeout to %1ms</term>
+<listitem><para>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_START_READ">
+<term>CC_START_READ starting asynchronous read</term>
+<listitem><para>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SUBSCRIBE">
+<term>CC_SUBSCRIBE subscribing to communication group %1</term>
+<listitem><para>
+Debug message. The program wants to receive messages addressed to this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_TIMEOUT">
+<term>CC_TIMEOUT timeout reading data from command channel</term>
+<listitem><para>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_UNSUBSCRIBE">
+<term>CC_UNSUBSCRIBE unsubscribing from communication group %1</term>
+<listitem><para>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_WRITE_ERROR">
+<term>CC_WRITE_ERROR error writing data to command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ZERO_LENGTH">
+<term>CC_ZERO_LENGTH invalid message length (0)</term>
+<listitem><para>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE">
+<term>CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</term>
+<listitem><para>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_CC_SESSION_ERROR">
+<term>CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</term>
+<listitem><para>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_DATA_READ_ERROR">
+<term>CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</term>
+<listitem><para>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_STOPPED_BY_KEYBOARD">
+<term>CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
</para></listitem>
</varlistentry>
@@ -148,58 +808,61 @@ The message itself is ignored by this module.
<varlistentry id="CONFIG_CCSESSION_MSG_INTERNAL">
<term>CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</term>
<listitem><para>
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</para><para>
+The most likely cause of this error is a programming error. Please raise
+a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_FOPEN_ERR">
-<term>CONFIG_FOPEN_ERR error opening %1: %2</term>
+<varlistentry id="CONFIG_GET_FAIL">
+<term>CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</term>
<listitem><para>
-There was an error opening the given file.
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
</para></listitem>
</varlistentry>
<varlistentry id="CONFIG_JSON_PARSE">
<term>CONFIG_JSON_PARSE JSON parse error in %1: %2</term>
<listitem><para>
-There was a parse error in the JSON file. The given file does not appear
+There was an error parsing the JSON file. The given file does not appear
to be in valid JSON format. Please verify that the filename is correct
and that the contents are valid JSON.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MANAGER_CONFIG">
-<term>CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</term>
+<varlistentry id="CONFIG_MOD_SPEC_FORMAT">
+<term>CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</term>
<listitem><para>
-The configuration manager returned an error when this module requested
-the configuration. The full error message answer from the configuration
-manager is appended to the log error. The most likely cause is that
-the module is of a different (command specification) version than the
-running configuration manager.
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MANAGER_MOD_SPEC">
-<term>CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</term>
+<varlistentry id="CONFIG_MOD_SPEC_REJECT">
+<term>CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</term>
<listitem><para>
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MODULE_SPEC">
-<term>CONFIG_MODULE_SPEC module specification error in %1: %2</term>
+<varlistentry id="CONFIG_OPEN_FAIL">
+<term>CONFIG_OPEN_FAIL error opening %1: %2</term>
<listitem><para>
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
+There was an error opening the given file. The reason for the failure
+is included in the message.
</para></listitem>
</varlistentry>
@@ -349,7 +1012,7 @@ returning the CNAME instead.
<term>DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</term>
<listitem><para>
This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
+other way around -- adding some other data to CNAME.
</para></listitem>
</varlistentry>
@@ -401,7 +1064,7 @@ Debug information. A DNAME was found instead of the requested information.
</varlistentry>
<varlistentry id="DATASRC_MEM_DNAME_NS">
-<term>DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</term>
+<term>DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</term>
<listitem><para>
It was requested for DNAME and NS records to be put into the same domain
which is not the apex (the top of the zone). This is forbidden by RFC
@@ -544,7 +1207,7 @@ behaviour is specified by RFC 1034, section 4.3.3
</varlistentry>
<varlistentry id="DATASRC_MEM_WILDCARD_DNAME">
-<term>DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</term>
+<term>DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</term>
<listitem><para>
The software refuses to load DNAME records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
@@ -554,7 +1217,7 @@ different tools.
</varlistentry>
<varlistentry id="DATASRC_MEM_WILDCARD_NS">
-<term>DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</term>
+<term>DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</term>
<listitem><para>
The software refuses to load NS records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
@@ -666,7 +1329,7 @@ way down to the given domain.
</varlistentry>
<varlistentry id="DATASRC_QUERY_EMPTY_CNAME">
-<term>DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</term>
+<term>DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</term>
<listitem><para>
There was an CNAME and it was being followed. But it contains no records,
so there's nowhere to go. There will be no answer. This indicates a problem
@@ -905,7 +1568,7 @@ already. The code is 1 for error, 2 for not implemented.
</varlistentry>
<varlistentry id="DATASRC_QUERY_TOO_MANY_CNAMES">
-<term>DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</term>
+<term>DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</term>
<listitem><para>
A CNAME led to another CNAME and it led to another, and so on. After 16
CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
@@ -962,14 +1625,14 @@ Debug information. The SQLite data source is closing the database file.
</varlistentry>
<varlistentry id="DATASRC_SQLITE_CREATE">
-<term>DATASRC_SQLITE_CREATE sQLite data source created</term>
+<term>DATASRC_SQLITE_CREATE SQLite data source created</term>
<listitem><para>
Debug information. An instance of SQLite data source is being created.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_SQLITE_DESTROY">
-<term>DATASRC_SQLITE_DESTROY sQLite data source destroyed</term>
+<term>DATASRC_SQLITE_DESTROY SQLite data source destroyed</term>
<listitem><para>
Debug information. An instance of SQLite data source is being destroyed.
</para></listitem>
@@ -978,7 +1641,7 @@ Debug information. An instance of SQLite data source is being destroyed.
<varlistentry id="DATASRC_SQLITE_ENCLOSURE">
<term>DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</term>
<listitem><para>
-Debug information. The SQLite data source is trying to identify, which zone
+Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
</para></listitem>
</varlistentry>
@@ -986,7 +1649,7 @@ should hold this domain.
<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOTFOUND">
<term>DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</term>
<listitem><para>
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
no such zone in our data.
</para></listitem>
</varlistentry>
@@ -1050,7 +1713,7 @@ a referral and where it goes.
<varlistentry id="DATASRC_SQLITE_FINDREF_BAD_CLASS">
<term>DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</term>
<listitem><para>
-The SQLite data source was trying to identify, if there's a referral. But
+The SQLite data source was trying to identify if there's a referral. But
it contains different class than the query was for.
</para></listitem>
</varlistentry>
@@ -1143,294 +1806,325 @@ generated.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_ABOVEDBGMAX">
-<term>LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</term>
+<varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
+<term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
<listitem><para>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_BADDEBUG">
-<term>LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</term>
+<varlistentry id="LOGIMPL_BAD_DEBUG_STRING">
+<term>LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</term>
<listitem><para>
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form. In particular,
-it starts DEBUG but does not end with an integer.
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_BELOWDBGMIN">
-<term>LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</term>
+<varlistentry id="LOGIMPL_BELOW_MIN_DEBUG">
+<term>LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</term>
<listitem><para>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADDESTINATION">
-<term>MSG_BADDESTINATION unrecognized log destination: %1</term>
+<varlistentry id="LOG_BAD_DESTINATION">
+<term>LOG_BAD_DESTINATION unrecognized log destination: %1</term>
<listitem><para>
A logger destination value was given that was not recognized. The
destination should be one of "console", "file", or "syslog".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADSEVERITY">
-<term>MSG_BADSEVERITY unrecognized log severity: %1</term>
+<varlistentry id="LOG_BAD_SEVERITY">
+<term>LOG_BAD_SEVERITY unrecognized log severity: %1</term>
<listitem><para>
A logger severity value was given that was not recognized. The severity
should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADSTREAM">
-<term>MSG_BADSTREAM bad log console output stream: %1</term>
+<varlistentry id="LOG_BAD_STREAM">
+<term>LOG_BAD_STREAM bad log console output stream: %1</term>
<listitem><para>
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
+A log console output stream was given that was not recognized. The output
+stream should be one of "stdout", or "stderr"
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_DUPLNS">
-<term>MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</term>
+<varlistentry id="LOG_DUPLICATE_MESSAGE_ID">
+<term>LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</term>
<listitem><para>
-When reading a message file, more than one $NAMESPACE directive was found. In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
+During start-up, BIND10 detected that the given message identification had
+been defined multiple times in the BIND10 code.
+</para><para>
+This has no ill-effects other than the possibility that an erronous
+message may be logged. However, as it is indicative of a programming
+error, please log a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_DUPMSGID">
-<term>MSG_DUPMSGID duplicate message ID (%1) in compiled code</term>
+<varlistentry id="LOG_DUPLICATE_NAMESPACE">
+<term>LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</term>
<listitem><para>
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules. (All message
-IDs should be unique throughout BIND10.) This has no impact on the operation
-of the server other that erroneous messages may be logged. (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded. However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
+When reading a message file, more than one $NAMESPACE directive was found.
+Such a condition is regarded as an error and the read will be abandoned.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_IDNOTFND">
-<term>MSG_IDNOTFND could not replace message text for '%1': no such message</term>
+<varlistentry id="LOG_INPUT_OPEN_FAIL">
+<term>LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</term>
<listitem><para>
-During start-up a local message file was read. A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary. Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
-</para><para>
-This message may appear a number of times in the file, once for every such
-unknown message identification.
+The program was not able to open the specified input message file for
+the reason given.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_INVMSGID">
-<term>MSG_INVMSGID line %1: invalid message identification '%2'</term>
+<varlistentry id="LOG_INVALID_MESSAGE_ID">
+<term>LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</term>
<listitem><para>
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NOMSGID">
-<term>MSG_NOMSGID line %1: message definition line found without a message ID</term>
+<varlistentry id="LOG_NAMESPACE_EXTRA_ARGS">
+<term>LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</term>
<listitem><para>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NOMSGTXT">
-<term>MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</term>
+<varlistentry id="LOG_NAMESPACE_INVALID_ARG">
+<term>LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
<listitem><para>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSEXTRARG">
-<term>MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</term>
+<varlistentry id="LOG_NAMESPACE_NO_ARGS">
+<term>LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</term>
<listitem><para>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSINVARG">
-<term>MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<varlistentry id="LOG_NO_MESSAGE_ID">
+<term>LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</term>
<listitem><para>
-The $NAMESPACE argument should be a valid C++ namespace. The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct. The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSNOARG">
-<term>MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</term>
+<varlistentry id="LOG_NO_MESSAGE_TEXT">
+<term>LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</term>
<listitem><para>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_OPENIN">
-<term>MSG_OPENIN unable to open message file %1 for input: %2</term>
+<varlistentry id="LOG_NO_SUCH_MESSAGE">
+<term>LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</term>
<listitem><para>
-The program was not able to open the specified input message file for the
-reason given.
+During start-up a local message file was read. A line with the listed
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+</para><para>
+There may be several reasons why this message may appear:
+</para><para>
+- The message ID has been mis-spelled in the local message file.
+</para><para>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</para><para>
+- The local file was written for an earlier version of the BIND10 software
+and the later version no longer generates that message.
+</para><para>
+Whatever the reason, there is no impact on the operation of BIND10.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_OPENOUT">
-<term>MSG_OPENOUT unable to open %1 for output: %2</term>
+<varlistentry id="LOG_OPEN_OUTPUT_FAIL">
+<term>LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</term>
<listitem><para>
-The program was not able to open the specified output file for the reason
-given.
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_PRFEXTRARG">
-<term>MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</term>
+<varlistentry id="LOG_PREFIX_EXTRA_ARGS">
+<term>LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</term>
<listitem><para>
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created. This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND10.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_PRFINVARG">
-<term>MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
+<varlistentry id="LOG_PREFIX_INVALID_ARG">
+<term>LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
<listitem><para>
-The $PREFIX argument is used in a symbol name in a C++ header file. As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restictions.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND10.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_RDLOCMES">
-<term>MSG_RDLOCMES reading local message file %1</term>
+<varlistentry id="LOG_READING_LOCAL_FILE">
+<term>LOG_READING_LOCAL_FILE reading local message file %1</term>
<listitem><para>
-This is an informational message output by BIND10 when it starts to read a
-local message file. (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
+This is an informational message output by BIND10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_READERR">
-<term>MSG_READERR error reading from message file %1: %2</term>
+<varlistentry id="LOG_READ_ERROR">
+<term>LOG_READ_ERROR error reading from message file %1: %2</term>
<listitem><para>
The specified error was encountered reading from the named message file.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_UNRECDIR">
-<term>MSG_UNRECDIR line %1: unrecognised directive '%2'</term>
+<varlistentry id="LOG_UNRECOGNISED_DIRECTIVE">
+<term>LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</term>
<listitem><para>
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_WRITERR">
-<term>MSG_WRITERR error writing to %1: %2</term>
+<varlistentry id="LOG_WRITE_ERROR">
+<term>LOG_WRITE_ERROR error writing to %1: %2</term>
<listitem><para>
-The specified error was encountered by the message compiler when writing to
-the named output file.
+The specified error was encountered by the message compiler when writing
+to the named output file.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_INVRESPSTR">
-<term>NSAS_INVRESPSTR queried for %1 but got invalid response</term>
+<varlistentry id="NSAS_FIND_NS_ADDRESS">
+<term>NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</term>
<listitem><para>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for a RR for the
-specified nameserver but received an invalid response. Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_INVRESPTC">
-<term>NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<varlistentry id="NSAS_FOUND_ADDRESS">
+<term>NSAS_FOUND_ADDRESS found address %1 for %2</term>
<listitem><para>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_LOOKUPCANCEL">
-<term>NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</term>
+<varlistentry id="NSAS_INVALID_RESPONSE">
+<term>NSAS_INVALID_RESPONSE queried for %1 but got invalid response</term>
<listitem><para>
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</para><para>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_LOOKUPZONE">
-<term>NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</term>
+<varlistentry id="NSAS_LOOKUP_CANCEL">
+<term>NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</term>
<listitem><para>
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSADDR">
-<term>NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</term>
+<varlistentry id="NSAS_NS_LOOKUP_FAIL">
+<term>NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSLKUPFAIL">
-<term>NSAS_NSLKUPFAIL failed to lookup any %1 for %2</term>
+<varlistentry id="NSAS_SEARCH_ZONE_NS">
+<term>NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver. This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSLKUPSUCC">
-<term>NSAS_NSLKUPSUCC found address %1 for %2</term>
+<varlistentry id="NSAS_UPDATE_RTT">
+<term>NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
+A NSAS (nameserver address store - part of the resolver) debug message
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_SETRTT">
-<term>NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</term>
+<varlistentry id="NSAS_WRONG_ANSWER">
+<term>NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</term>
<listitem><para>
-A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver. The RTT has been updated using the value given and the new RTT is
-displayed. (The RTT is subject to a calculation that damps out sudden
-changes. As a result, the new RTT is not necessarily equal to the RTT
-reported.)
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</para><para>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</para></listitem>
</varlistentry>
@@ -1460,16 +2154,16 @@ type> tuple in the cache; instead, the deepest delegation found is indicated.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_FOLLOWCNAME">
-<term>RESLIB_FOLLOWCNAME following CNAME chain to <%1></term>
+<varlistentry id="RESLIB_FOLLOW_CNAME">
+<term>RESLIB_FOLLOW_CNAME following CNAME chain to <%1></term>
<listitem><para>
A debug message, a CNAME response was received and another query is being issued
for the <name, class, type> tuple.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_LONGCHAIN">
-<term>RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
+<varlistentry id="RESLIB_LONG_CHAIN">
+<term>RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
<listitem><para>
A debug message recording that a CNAME response has been received to an upstream
query for the specified question (Previous debug messages will have indicated
@@ -1479,26 +2173,26 @@ is where on CNAME points to another) and so an error is being returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NONSRRSET">
-<term>RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></term>
+<varlistentry id="RESLIB_NO_NS_RRSET">
+<term>RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></term>
<listitem><para>
A debug message, this indicates that a response was received for the specified
-query and was categorised as a referral. However, the received message did
+query and was categorized as a referral. However, the received message did
not contain any NS RRsets. This may indicate a programming error in the
response classification code.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NSASLOOK">
-<term>RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</term>
+<varlistentry id="RESLIB_NSAS_LOOKUP">
+<term>RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</term>
<listitem><para>
A debug message, the RunningQuery object is querying the NSAS for the
nameservers for the specified zone.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NXDOMRR">
-<term>RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
+<varlistentry id="RESLIB_NXDOM_NXRR">
+<term>RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
<listitem><para>
A debug message recording that either a NXDOMAIN or an NXRRSET response has
been received to an upstream query for the specified question. Previous debug
@@ -1514,8 +2208,8 @@ are no retries left, an error will be reported.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_PROTOCOLRTRY">
-<term>RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</term>
+<varlistentry id="RESLIB_PROTOCOL_RETRY">
+<term>RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</term>
<listitem><para>
A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
@@ -1523,33 +2217,16 @@ repeated query, there will be the indicated number of retries left.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RCODERR">
-<term>RESLIB_RCODERR RCODE indicates error in response to query for <%1></term>
+<varlistentry id="RESLIB_RCODE_ERR">
+<term>RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></term>
<listitem><para>
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_REFERRAL">
-<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
-<listitem><para>
-A debug message recording that a referral response has been received to an
-upstream query for the specified question. Previous debug messages will
-have indicated the server to which the question was sent.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="RESLIB_REFERZONE">
-<term>RESLIB_REFERZONE referred to zone %1</term>
-<listitem><para>
-A debug message indicating that the last referral message was to the specified
-zone.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="RESLIB_RESCAFND">
-<term>RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RECQ_CACHE_FIND">
+<term>RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</term>
<listitem><para>
This is a debug message and indicates that a RecursiveQuery object found the
the specified <name, class, type> tuple in the cache. The instance number
@@ -1558,8 +2235,8 @@ been called.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RESCANOTFND">
-<term>RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RECQ_CACHE_NO_FIND">
+<term>RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
<listitem><para>
This is a debug message and indicates that the look in the cache made by the
RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
@@ -1569,6 +2246,23 @@ been called.
</para></listitem>
</varlistentry>
+<varlistentry id="RESLIB_REFERRAL">
+<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFER_ZONE">
+<term>RESLIB_REFER_ZONE referred to zone %1</term>
+<listitem><para>
+A debug message indicating that the last referral message was to the specified
+zone.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="RESLIB_RESOLVE">
<term>RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</term>
<listitem><para>
@@ -1579,8 +2273,8 @@ message indicates which of the two resolve() methods has been called.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RRSETFND">
-<term>RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RRSET_FOUND">
+<term>RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
<listitem><para>
A debug message, indicating that when RecursiveQuery::resolve queried the
cache, a single RRset was found which was put in the answer. The instance
@@ -1596,16 +2290,16 @@ A debug message giving the round-trip time of the last query and response.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNCAFND">
-<term>RESLIB_RUNCAFND found <%1> in the cache</term>
+<varlistentry id="RESLIB_RUNQ_CACHE_FIND">
+<term>RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</term>
<listitem><para>
This is a debug message and indicates that a RunningQuery object found
the specified <name, class, type> tuple in the cache.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNCALOOK">
-<term>RESLIB_RUNCALOOK looking up up <%1> in the cache</term>
+<varlistentry id="RESLIB_RUNQ_CACHE_LOOKUP">
+<term>RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</term>
<listitem><para>
This is a debug message and indicates that a RunningQuery object has made
a call to its doLookup() method to look up the specified <name, class, type>
@@ -1613,16 +2307,16 @@ tuple, the first action of which will be to examine the cache.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNQUFAIL">
-<term>RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</term>
+<varlistentry id="RESLIB_RUNQ_FAIL">
+<term>RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</term>
<listitem><para>
A debug message indicating that a RunningQuery's failure callback has been
called because all nameservers for the zone in question are unreachable.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNQUSUCC">
-<term>RESLIB_RUNQUSUCC success callback - sending query to %1</term>
+<varlistentry id="RESLIB_RUNQ_SUCCESS">
+<term>RESLIB_RUNQ_SUCCESS success callback - sending query to %1</term>
<listitem><para>
A debug message indicating that a RunningQuery's success callback has been
called because a nameserver has been found, and that a query is being sent
@@ -1630,8 +2324,8 @@ to the specified nameserver.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TESTSERV">
-<term>RESLIB_TESTSERV setting test server to %1(%2)</term>
+<varlistentry id="RESLIB_TEST_SERVER">
+<term>RESLIB_TEST_SERVER setting test server to %1(%2)</term>
<listitem><para>
This is an internal debugging message and is only generated in unit tests.
It indicates that all upstream queries from the resolver are being routed to
@@ -1641,8 +2335,8 @@ operation, it is a warning message instead of a debug message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TESTUPSTR">
-<term>RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</term>
+<varlistentry id="RESLIB_TEST_UPSTREAM">
+<term>RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</term>
<listitem><para>
This is a debug message and should only be seen in unit tests. A query for
the specified <name, class, type> tuple is being sent to a test nameserver
@@ -1658,8 +2352,8 @@ there are no retries left, an error will be reported.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TIMEOUTRTRY">
-<term>RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
+<varlistentry id="RESLIB_TIMEOUT_RETRY">
+<term>RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
<listitem><para>
A debug message indicating that the specified query has timed out and that
the resolver is repeating the query to the same nameserver. After this
@@ -1685,8 +2379,8 @@ tuple is being sent to a nameserver whose address is given in the message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_AXFRTCP">
-<term>RESOLVER_AXFRTCP AXFR request received over TCP</term>
+<varlistentry id="RESOLVER_AXFR_TCP">
+<term>RESOLVER_AXFR_TCP AXFR request received over TCP</term>
<listitem><para>
A debug message, the resolver received a NOTIFY message over TCP. The server
cannot process it and will return an error message to the sender with the
@@ -1694,8 +2388,8 @@ RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_AXFRUDP">
-<term>RESOLVER_AXFRUDP AXFR request received over UDP</term>
+<varlistentry id="RESOLVER_AXFR_UDP">
+<term>RESOLVER_AXFR_UDP AXFR request received over UDP</term>
<listitem><para>
A debug message, the resolver received a NOTIFY message over UDP. The server
cannot process it (and in any case, an AXFR request should be sent over TCP)
@@ -1703,24 +2397,24 @@ and will return an error message to the sender with the RCODE set to FORMERR.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CLTMOSMALL">
-<term>RESOLVER_CLTMOSMALL client timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_CLIENT_TIME_SMALL">
+<term>RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</term>
<listitem><para>
An error indicating that the configuration value specified for the query
timeout is too small.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGCHAN">
-<term>RESOLVER_CONFIGCHAN configuration channel created</term>
+<varlistentry id="RESOLVER_CONFIG_CHANNEL">
+<term>RESOLVER_CONFIG_CHANNEL configuration channel created</term>
<listitem><para>
A debug message, output when the resolver has successfully established a
connection to the configuration channel.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGERR">
-<term>RESOLVER_CONFIGERR error in configuration: %1</term>
+<varlistentry id="RESOLVER_CONFIG_ERROR">
+<term>RESOLVER_CONFIG_ERROR error in configuration: %1</term>
<listitem><para>
An error was detected in a configuration update received by the resolver. This
may be in the format of the configuration message (in which case this is a
@@ -1730,16 +2424,16 @@ will give more details.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGLOAD">
-<term>RESOLVER_CONFIGLOAD configuration loaded</term>
+<varlistentry id="RESOLVER_CONFIG_LOADED">
+<term>RESOLVER_CONFIG_LOADED configuration loaded</term>
<listitem><para>
A debug message, output when the resolver configuration has been successfully
loaded.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGUPD">
-<term>RESOLVER_CONFIGUPD configuration updated: %1</term>
+<varlistentry id="RESOLVER_CONFIG_UPDATED">
+<term>RESOLVER_CONFIG_UPDATED configuration updated: %1</term>
<listitem><para>
A debug message, the configuration has been updated with the specified
information.
@@ -1753,16 +2447,16 @@ A debug message, output when the Resolver() object has been created.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_DNSMSGRCVD">
-<term>RESOLVER_DNSMSGRCVD DNS message received: %1</term>
+<varlistentry id="RESOLVER_DNS_MESSAGE_RECEIVED">
+<term>RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</term>
<listitem><para>
A debug message, this always precedes some other logging message and is the
formatted contents of the DNS packet that the other message refers to.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_DNSMSGSENT">
-<term>RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</term>
+<varlistentry id="RESOLVER_DNS_MESSAGE_SENT">
+<term>RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</term>
<listitem><para>
A debug message, this contains details of the response sent back to the querying
system.
@@ -1777,24 +2471,24 @@ resolver. All it can do is to shut down.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_FWDADDR">
-<term>RESOLVER_FWDADDR setting forward address %1(%2)</term>
+<varlistentry id="RESOLVER_FORWARD_ADDRESS">
+<term>RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</term>
<listitem><para>
This message may appear multiple times during startup, and it lists the
forward addresses used by the resolver when running in forwarding mode.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_FWDQUERY">
-<term>RESOLVER_FWDQUERY processing forward query</term>
+<varlistentry id="RESOLVER_FORWARD_QUERY">
+<term>RESOLVER_FORWARD_QUERY processing forward query</term>
<listitem><para>
The received query has passed all checks and is being forwarded to upstream
servers.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_HDRERR">
-<term>RESOLVER_HDRERR message received, exception when processing header: %1</term>
+<varlistentry id="RESOLVER_HEADER_ERROR">
+<term>RESOLVER_HEADER_ERROR message received, exception when processing header: %1</term>
<listitem><para>
A debug message noting that an exception occurred during the processing of
a received packet. The packet has been dropped.
@@ -1809,49 +2503,59 @@ and will return an error message to the sender with the RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_LKTMOSMALL">
-<term>RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_LOOKUP_TIME_SMALL">
+<term>RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</term>
<listitem><para>
An error indicating that the configuration value specified for the lookup
timeout is too small.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NFYNOTAUTH">
-<term>RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</term>
+<varlistentry id="RESOLVER_MESSAGE_ERROR">
+<term>RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</term>
<listitem><para>
-The resolver received a NOTIFY message. As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
+A debug message noting that the resolver received a message and the
+parsing of the body of the message failed due to some error (although
+the parsing of the header succeeded). The message parameters give a
+textual description of the problem and the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NORMQUERY">
-<term>RESOLVER_NORMQUERY processing normal query</term>
+<varlistentry id="RESOLVER_NEGATIVE_RETRIES">
+<term>RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</term>
<listitem><para>
-The received query has passed all checks and is being processed by the resolver.
+An error message indicating that the resolver configuration has specified a
+negative retry count. Only zero or positive values are valid.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOROOTADDR">
-<term>RESOLVER_NOROOTADDR no root addresses available</term>
+<varlistentry id="RESOLVER_NON_IN_PACKET">
+<term>RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</term>
<listitem><para>
-A warning message during startup, indicates that no root addresses have been
-set. This may be because the resolver will get them from a priming query.
+A debug message, the resolver has received a DNS packet that was not IN class.
+The resolver cannot handle such packets, so is returning a REFUSED response to
+the sender.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOTIN">
-<term>RESOLVER_NOTIN non-IN class request received, returning REFUSED message</term>
+<varlistentry id="RESOLVER_NORMAL_QUERY">
+<term>RESOLVER_NORMAL_QUERY processing normal query</term>
<listitem><para>
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
+The received query has passed all checks and is being processed by the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOTIFY_RECEIVED">
+<term>RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</term>
+<listitem><para>
+The resolver received a NOTIFY message. As the server is not authoritative it
+cannot process it, so it returns an error message to the sender with the RCODE
+set to NOTAUTH.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOTONEQUES">
-<term>RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</term>
+<varlistentry id="RESOLVER_NOT_ONE_QUESTION">
+<term>RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</term>
<listitem><para>
A debug message, the resolver received a query that contained the number of
entires in the question section detailed in the message. This is a malformed
@@ -1860,17 +2564,16 @@ return a message to the sender with the RCODE set to FORMERR.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_OPCODEUNS">
-<term>RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</term>
+<varlistentry id="RESOLVER_NO_ROOT_ADDRESS">
+<term>RESOLVER_NO_ROOT_ADDRESS no root addresses available</term>
<listitem><para>
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes). It will return a message to the sender
-with the RCODE set to NOTIMP.
+A warning message during startup, indicates that no root addresses have been
+set. This may be because the resolver will get them from a priming query.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PARSEERR">
-<term>RESOLVER_PARSEERR error parsing received message: %1 - returning %2</term>
+<varlistentry id="RESOLVER_PARSE_ERROR">
+<term>RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</term>
<listitem><para>
A debug message noting that the resolver received a message and the parsing
of the body of the message failed due to some non-protocol related reason
@@ -1879,16 +2582,16 @@ a textual description of the problem and the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PRINTMSG">
-<term>RESOLVER_PRINTMSG print message command, aeguments are: %1</term>
+<varlistentry id="RESOLVER_PRINT_COMMAND">
+<term>RESOLVER_PRINT_COMMAND print message command, arguments are: %1</term>
<listitem><para>
This message is logged when a "print_message" command is received over the
command channel.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PROTERR">
-<term>RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</term>
+<varlistentry id="RESOLVER_PROTOCOL_ERROR">
+<term>RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</term>
<listitem><para>
A debug message noting that the resolver received a message and the parsing
of the body of the message failed due to some protocol error (although the
@@ -1897,73 +2600,90 @@ description of the problem and the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUSETUP">
-<term>RESOLVER_QUSETUP query setup</term>
+<varlistentry id="RESOLVER_QUERY_ACCEPTED">
+<term>RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</term>
<listitem><para>
-A debug message noting that the resolver is creating a RecursiveQuery object.
+A debug message that indicates an incoming query is accepted in terms of
+the query ACL. The log message shows the query in the form of
+<query name>/<query type>/<query class>, and the client that sends the
+query in the form of <Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUSHUT">
-<term>RESOLVER_QUSHUT query shutdown</term>
+<varlistentry id="RESOLVER_QUERY_DROPPED">
+<term>RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</term>
<listitem><para>
-A debug message noting that the resolver is destroying a RecursiveQuery object.
+An informational message that indicates an incoming query is dropped
+in terms of the query ACL. Unlike the RESOLVER_QUERY_REJECTED
+case, the server does not return any response. The log message
+shows the query in the form of <query name>/<query type>/<query
+class>, and the client that sends the query in the form of <Source
+IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUTMOSMALL">
-<term>RESOLVER_QUTMOSMALL query timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_QUERY_REJECTED">
+<term>RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</term>
<listitem><para>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+An informational message that indicates an incoming query is rejected
+in terms of the query ACL. This results in a response with an RCODE of
+REFUSED. The log message shows the query in the form of <query
+name>/<query type>/<query class>, and the client that sends the
+query in the form of <Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RECURSIVE">
-<term>RESOLVER_RECURSIVE running in recursive mode</term>
+<varlistentry id="RESOLVER_QUERY_SETUP">
+<term>RESOLVER_QUERY_SETUP query setup</term>
<listitem><para>
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
+A debug message noting that the resolver is creating a RecursiveQuery object.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RECVMSG">
-<term>RESOLVER_RECVMSG resolver has received a DNS message</term>
+<varlistentry id="RESOLVER_QUERY_SHUTDOWN">
+<term>RESOLVER_QUERY_SHUTDOWN query shutdown</term>
<listitem><para>
-A debug message indicating that the resolver has received a message. Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
+A debug message noting that the resolver is destroying a RecursiveQuery object.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RETRYNEG">
-<term>RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</term>
+<varlistentry id="RESOLVER_QUERY_TIME_SMALL">
+<term>RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</term>
<listitem><para>
-An error message indicating that the resolver configuration has specified a
-negative retry count. Only zero or positive values are valid.
+An error indicating that the configuration value specified for the query
+timeout is too small.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_ROOTADDR">
-<term>RESOLVER_ROOTADDR setting root address %1(%2)</term>
+<varlistentry id="RESOLVER_RECEIVED_MESSAGE">
+<term>RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</term>
<listitem><para>
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
+A debug message indicating that the resolver has received a message. Depending
+on the debug settings, subsequent log output will indicate the nature of the
+message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RECURSIVE">
+<term>RESOLVER_RECURSIVE running in recursive mode</term>
+<listitem><para>
+This is an informational message that appears at startup noting that the
+resolver is running in recursive mode.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_SERVICE">
-<term>RESOLVER_SERVICE service object created</term>
+<varlistentry id="RESOLVER_SERVICE_CREATED">
+<term>RESOLVER_SERVICE_CREATED service object created</term>
<listitem><para>
A debug message, output when the main service object (which handles the
received queries) is created.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_SETPARAM">
-<term>RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
+<varlistentry id="RESOLVER_SET_PARAMS">
+<term>RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
<listitem><para>
-A debug message, lists the parameters associated with the message. These are:
+A debug message, lists the parameters being set for the resolver. These are:
query timeout: the timeout (in ms) used for queries originated by the resolver
to upstream servers. Client timeout: the interval to resolver a query by
a client: after this time, the resolver sends back a SERVFAIL to the client
@@ -1972,17 +2692,33 @@ resolver gives up trying to resolve a query. Retry count: the number of times
the resolver will retry a query to an upstream server if it gets a timeout.
</para><para>
The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
upstream nameservers. Even if none of these queries timeout, the total time
taken to perform all the queries may exceed the client timeout. When this
happens, a SERVFAIL is returned to the client, but the resolver continues
with the resolution process. Data received is added to the cache. However,
-there comes a time - the lookup timeout - when even the resolve gives up.
+there comes a time - the lookup timeout - when even the resolver gives up.
At this point it will wait for pending upstream queries to complete or
timeout and drop the query.
</para></listitem>
</varlistentry>
+<varlistentry id="RESOLVER_SET_QUERY_ACL">
+<term>RESOLVER_SET_QUERY_ACL query ACL is configured</term>
+<listitem><para>
+A debug message that appears when a new query ACL is configured for the
+resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SET_ROOT_ADDRESS">
+<term>RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</term>
+<listitem><para>
+This message may appear multiple times during startup; it lists the root
+addresses used by the resolver.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="RESOLVER_SHUTDOWN">
<term>RESOLVER_SHUTDOWN resolver shutdown complete</term>
<listitem><para>
@@ -2005,13 +2741,386 @@ An informational message, this is output when the resolver starts up.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_UNEXRESP">
-<term>RESOLVER_UNEXRESP received unexpected response, ignoring</term>
+<varlistentry id="RESOLVER_UNEXPECTED_RESPONSE">
+<term>RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</term>
<listitem><para>
A debug message noting that the server has received a response instead of a
query and is ignoring it.
</para></listitem>
</varlistentry>
+
+<varlistentry id="RESOLVER_UNSUPPORTED_OPCODE">
+<term>RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</term>
+<listitem><para>
+A debug message, the resolver received a message with an unsupported opcode
+(it can only process QUERY opcodes). It will return a message to the sender
+with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_DATABASE_FAILURE">
+<term>XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_INTERNAL_FAILURE">
+<term>XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_FAILURE">
+<term>XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_STARTED">
+<term>XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</term>
+<listitem><para>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_SUCCESS">
+<term>XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</term>
+<listitem><para>
+The AXFR transfer of the given zone was successfully completed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_ADDR_FORMAT">
+<term>XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</term>
+<listitem><para>
+The given master address is not a valid IP address.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_PORT_FORMAT">
+<term>XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</term>
+<listitem><para>
+The master port as read from the configuration is not a valid port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_TSIG_KEY_STRING">
+<term>XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_ZONE_CLASS">
+<term>XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</term>
+<listitem><para>
+The zone class as read from the configuration is not a valid DNS class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CC_SESSION_ERROR">
+<term>XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_COMMAND_ERROR">
+<term>XFRIN_COMMAND_ERROR error while executing command '%1': %2</term>
+<listitem><para>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CONNECT_MASTER">
+<term>XFRIN_CONNECT_MASTER error connecting to master at %1: %2</term>
+<listitem><para>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_IMPORT_DNS">
+<term>XFRIN_IMPORT_DNS error importing python DNS module: %1</term>
+<listitem><para>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR">
+<term>XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</term>
+<listitem><para>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER">
+<term>XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</term>
+<listitem><para>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_RETRANSFER_UNKNOWN_ZONE">
+<term>XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</term>
+<listitem><para>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STARTING">
+<term>XFRIN_STARTING starting resolver with command line '%1'</term>
+<listitem><para>
+An informational message, this is output when the resolver starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STOPPED_BY_KEYBOARD">
+<term>XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_UNKNOWN_ERROR">
+<term>XFRIN_UNKNOWN_ERROR unknown error: %1</term>
+<listitem><para>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_DONE">
+<term>XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</term>
+<listitem><para>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_ERROR">
+<term>XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</term>
+<listitem><para>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_FAILED">
+<term>XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</term>
+<listitem><para>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_STARTED">
+<term>XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</term>
+<listitem><para>
+A transfer out of the given zone has started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_BAD_TSIG_KEY_STRING">
+<term>XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_ERROR">
+<term>XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_TIMEOUT_ERROR">
+<term>XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</term>
+<listitem><para>
+There was a problem reading a response from antoher module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_FETCH_REQUEST_ERROR">
+<term>XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</term>
+<listitem><para>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_HANDLE_QUERY_ERROR">
+<term>XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</term>
+<listitem><para>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IMPORT">
+<term>XFROUT_IMPORT error importing python module: %1</term>
+<listitem><para>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG">
+<term>XFROUT_NEW_CONFIG Update xfrout configuration</term>
+<listitem><para>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG_DONE">
+<term>XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</term>
+<listitem><para>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NOTIFY_COMMAND">
+<term>XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</term>
+<listitem><para>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PARSE_QUERY_ERROR">
+<term>XFROUT_PARSE_QUERY_ERROR error parsing query: %1</term>
+<listitem><para>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PROCESS_REQUEST_ERROR">
+<term>XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</term>
+<listitem><para>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVED_SHUTDOWN_COMMAND">
+<term>XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR">
+<term>XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</term>
+<listitem><para>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</term>
+<listitem><para>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</term>
+<listitem><para>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_SOCKET_SELECT_ERROR">
+<term>XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</term>
+<listitem><para>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPED_BY_KEYBOARD">
+<term>XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPING">
+<term>XFROUT_STOPPING the xfrout daemon is shutting down</term>
+<listitem><para>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_UNIX_SOCKET_FILE_IN_USE">
+<term>XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</term>
+<listitem><para>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</para></listitem>
+</varlistentry>
</variablelist>
</para>
</chapter>
diff --git a/src/bin/auth/auth.spec.pre.in b/src/bin/auth/auth.spec.pre.in
index d88ffb5..2ce044e 100644
--- a/src/bin/auth/auth.spec.pre.in
+++ b/src/bin/auth/auth.spec.pre.in
@@ -122,6 +122,24 @@
}
]
}
+ ],
+ "statistics": [
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP ",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
]
}
}
diff --git a/src/bin/auth/b10-auth.8 b/src/bin/auth/b10-auth.8
index 0356683..aedadee 100644
--- a/src/bin/auth/b10-auth.8
+++ b/src/bin/auth/b10-auth.8
@@ -2,12 +2,12 @@
.\" Title: b10-auth
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 8, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-AUTH" "8" "March 8, 2011" "BIND10" "BIND10"
+.TH "B10\-AUTH" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -70,18 +70,6 @@ defines the path to the SQLite3 zone file when using the sqlite datasource\&. Th
/usr/local/var/bind10\-devel/zone\&.sqlite3\&.
.PP
-\fIlisten_on\fR
-is a list of addresses and ports for
-\fBb10\-auth\fR
-to listen on\&. The list items are the
-\fIaddress\fR
-string and
-\fIport\fR
-number\&. By default,
-\fBb10\-auth\fR
-listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
-.PP
-
\fIdatasources\fR
configures data sources\&. The list items include:
\fItype\fR
@@ -114,6 +102,18 @@ In this development version, currently this is only used for the memory data sou
.RE
.PP
+\fIlisten_on\fR
+is a list of addresses and ports for
+\fBb10\-auth\fR
+to listen on\&. The list items are the
+\fIaddress\fR
+string and
+\fIport\fR
+number\&. By default,
+\fBb10\-auth\fR
+listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
+.PP
+
\fIstatistics\-interval\fR
is the timer interval in seconds for
\fBb10\-auth\fR
@@ -164,6 +164,25 @@ immediately\&.
\fBshutdown\fR
exits
\fBb10\-auth\fR\&. (Note that the BIND 10 boss process will restart this service\&.)
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+auth\&.queries\&.tcp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over TCP since startup\&.
+.RE
+.PP
+auth\&.queries\&.udp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over UDP since startup\&.
+.RE
.SH "FILES"
.PP
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index 2b53394..636f437 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 8, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -132,15 +132,6 @@
</para>
<para>
- <varname>listen_on</varname> is a list of addresses and ports for
- <command>b10-auth</command> to listen on.
- The list items are the <varname>address</varname> string
- and <varname>port</varname> number.
- By default, <command>b10-auth</command> listens on port 53
- on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
- </para>
-
- <para>
<varname>datasources</varname> configures data sources.
The list items include:
<varname>type</varname> to optionally choose the data source type
@@ -165,6 +156,15 @@
</para>
<para>
+ <varname>listen_on</varname> is a list of addresses and ports for
+ <command>b10-auth</command> to listen on.
+ The list items are the <varname>address</varname> string
+ and <varname>port</varname> number.
+ By default, <command>b10-auth</command> listens on port 53
+ on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
+ </para>
+
+ <para>
<varname>statistics-interval</varname> is the timer interval
in seconds for <command>b10-auth</command> to share its
statistics information to
@@ -209,6 +209,34 @@
</refsect1>
<refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>auth.queries.tcp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over TCP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>auth.queries.udp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over UDP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para>
<filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 05bcd89..3fe03c8 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -31,7 +31,7 @@ namespace isc {
namespace auth {
void
-Query::getAdditional(const ZoneFinder& zone, const RRset& rrset) const {
+Query::getAdditional(ZoneFinder& zone, const RRset& rrset) const {
RdataIteratorPtr rdata_iterator(rrset.getRdataIterator());
for (; !rdata_iterator->isLast(); rdata_iterator->next()) {
const Rdata& rdata(rdata_iterator->getCurrent());
@@ -47,7 +47,7 @@ Query::getAdditional(const ZoneFinder& zone, const RRset& rrset) const {
}
void
-Query::findAddrs(const ZoneFinder& zone, const Name& qname,
+Query::findAddrs(ZoneFinder& zone, const Name& qname,
const ZoneFinder::FindOptions options) const
{
// Out of zone name
@@ -86,7 +86,7 @@ Query::findAddrs(const ZoneFinder& zone, const Name& qname,
}
void
-Query::putSOA(const ZoneFinder& zone) const {
+Query::putSOA(ZoneFinder& zone) const {
ZoneFinder::FindResult soa_result(zone.find(zone.getOrigin(),
RRType::SOA()));
if (soa_result.code != ZoneFinder::SUCCESS) {
@@ -104,7 +104,7 @@ Query::putSOA(const ZoneFinder& zone) const {
}
void
-Query::getAuthAdditional(const ZoneFinder& zone) const {
+Query::getAuthAdditional(ZoneFinder& zone) const {
// Fill in authority and addtional sections.
ZoneFinder::FindResult ns_result = zone.find(zone.getOrigin(),
RRType::NS());
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index fa023fe..13523e8 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -69,7 +69,7 @@ private:
/// Adds a SOA of the zone into the authority zone of response_.
/// Can throw NoSOA.
///
- void putSOA(const isc::datasrc::ZoneFinder& zone) const;
+ void putSOA(isc::datasrc::ZoneFinder& zone) const;
/// \brief Look up additional data (i.e., address records for the names
/// included in NS or MX records).
@@ -85,7 +85,7 @@ private:
/// query is to be found.
/// \param rrset The RRset (i.e., NS or MX rrset) which require additional
/// processing.
- void getAdditional(const isc::datasrc::ZoneFinder& zone,
+ void getAdditional(isc::datasrc::ZoneFinder& zone,
const isc::dns::RRset& rrset) const;
/// \brief Find address records for a specified name.
@@ -104,7 +104,7 @@ private:
/// be found.
/// \param qname The name in rrset RDATA.
/// \param options The search options.
- void findAddrs(const isc::datasrc::ZoneFinder& zone,
+ void findAddrs(isc::datasrc::ZoneFinder& zone,
const isc::dns::Name& qname,
const isc::datasrc::ZoneFinder::FindOptions options
= isc::datasrc::ZoneFinder::FIND_DEFAULT) const;
@@ -127,7 +127,7 @@ private:
///
/// \param zone The \c ZoneFinder through which the NS and additional data
/// for the query are to be found.
- void getAuthAdditional(const isc::datasrc::ZoneFinder& zone) const;
+ void getAuthAdditional(isc::datasrc::ZoneFinder& zone) const;
public:
/// Constructor from query parameters.
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index 6a75856..68f0a1d 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -122,12 +122,12 @@ public:
masterLoad(zone_stream, origin_, rrclass_,
boost::bind(&MockZoneFinder::loadRRset, this, _1));
}
- virtual const isc::dns::Name& getOrigin() const { return (origin_); }
- virtual const isc::dns::RRClass& getClass() const { return (rrclass_); }
+ virtual isc::dns::Name getOrigin() const { return (origin_); }
+ virtual isc::dns::RRClass getClass() const { return (rrclass_); }
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
RRsetList* target = NULL,
- const FindOptions options = FIND_DEFAULT) const;
+ const FindOptions options = FIND_DEFAULT);
// If false is passed, it makes the zone broken as if it didn't have the
// SOA.
@@ -165,7 +165,7 @@ private:
ZoneFinder::FindResult
MockZoneFinder::find(const Name& name, const RRType& type,
- RRsetList* target, const FindOptions options) const
+ RRsetList* target, const FindOptions options)
{
// Emulating a broken zone: mandatory apex RRs are missing if specifically
// configured so (which are rare cases).
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 1128264..b101ba8 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 31, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -217,6 +217,30 @@ The default is the basename of ARG 0.
<!--
TODO: configuration section
-->
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>bind10.boot_time</term>
+ <listitem><para>
+ The date and time that the <command>bind10</command>
+ process started.
+ This is represented in ISO 8601 format.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
<!--
<refsect1>
<title>FILES</title>
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 1184fd1..b4cfac6 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -37,6 +37,17 @@
"command_description": "List the running BIND 10 processes",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
]
}
}
diff --git a/src/bin/bind10/creatorapi.txt b/src/bin/bind10/creatorapi.txt
new file mode 100644
index 0000000..c23d907
--- /dev/null
+++ b/src/bin/bind10/creatorapi.txt
@@ -0,0 +1,123 @@
+Socket creator API
+==================
+
+This API is between Boss and other modules to allow them requesting of sockets.
+For simplicity, we will use the socket creator for all (even non-privileged)
+ports for now, but we should have some function where we can abstract it later.
+
+Goals
+-----
+* Be able to request a socket of any combination IPv4/IPv6 UDP/TCP bound to given
+ port and address (sockets that are not bound to anything can be created
+ without privileges, therefore are not requested from the socket creator).
+* Allow to provide the same socket to multiple modules (eg. multiple running
+ auth servers).
+* Allow releasing the sockets (in case all modules using it give it up,
+ terminate or crash).
+* Allow restricting of the sharing (don't allow shared socket between auth
+ and recursive, as the packets would often get to the wrong application,
+ show error instead).
+* Get the socket to the application.
+
+Transport of sockets
+--------------------
+It seems we are stuck with current msgq for a while and there's a chance the
+new replacement will not be able to send sockets inbound. So, we need another
+channel.
+
+The boss will create a unix-domain socket and listen on it. When something
+requests a socket over the command channel and the socket is created, some kind
+of token is returned to the application (which will represent the future
+socket). The application then connects to the unix-domain socket, sends the
+token over the connection (so Boss will know which socket to send there, in case
+multiple applications ask for sockets simultaneously) and Boss sends the socket
+in return.
+
+In theory, we could send the requests directly over the unix-domain
+socket, but it has two disadvantages:
+* The msgq handles serializing/deserializing of structured
+ information (like the parameters to be used), we would have to do it
+ manually on the socket.
+* We could place some kind of security in front of msgq (in case file
+ permissions are not enough, for example if they are not honored on
+ socket files, as indicated in the first paragraph of:
+ http://lkml.indiana.edu/hypermail/linux/kernel/0505.2/0008.html).
+ The socket would have to be secured separately. With the tokens,
+ there's some level of security already - someone not having the
+ token can't request a priviledged socket.
+
+Caching of sockets
+------------------
+To allow sending the same socket to multiple application, the Boss process will
+hold a cache. Each socket that is created and sent is kept open in Boss and
+preserved there as well. A reference count is kept with each of them.
+
+When another application asks for the same socket, it is simply sent from the
+cache instead of creating it again by the creator.
+
+When application gives the socket willingly (by sending a message over the
+command channel), the reference count can be decreased without problems. But
+when the application terminates or crashes, we need to decrease it as well.
+There's a problem, since we don't know which command channel connection (eg.
+lname) belongs to which PID. Furthermore, the applications don't need to be
+started by boss.
+
+There are two possibilities:
+* Let the msgq send messages about disconnected clients (eg. group message to
+ some name). This one is better if we want to migrate to dbus, since dbus
+ already has this capability as well as sending the sockets inbound (at least it
+ seems so on unix) and we could get rid of the unix-domain socket completely.
+* Keep the unix-domain connections open forever. Boss can remember which socket
+ was sent to which connection and when the connection closes (because the
+ application crashed), it can drop all the references on the sockets. This
+ seems easier to implement.
+
+The commands
+------------
+* Command to release a socket. This one would have single parameter, the token
+ used to get the socket. After this, boss would decrease its reference count
+ and if it drops to zero, close its own copy of the socket. This should be used
+ when the module stops using the socket (and after closes it). The
+ library could remember the file-descriptor to token mapping (for
+ common applications that don't request the same socket multiple
+ times in parallel).
+* Command to request a socket. It would have parameters to specify which socket
+ (IP address, address family, port) and how to allow sharing. Sharing would be
+ one of:
+ - None
+ - Same kind of application (however, it is not entirely clear what
+ this means, in case it won't work out intuitively, we'll need to
+ define it somehow)
+ - Any kind of application
+ And a kind of application would be provided, to decide if the sharing is
+ possible (eg. if auth allows sharing with the same kind and something else
+ allows sharing with anything, the sharing is not possible, two auths can).
+
+ It would return either error (the socket can't be created or sharing is not
+ possible) or the token. Then there would be some time for the application to
+ pick up the requested socket.
+
+Examples
+--------
+We probably would have a library with blocking calls to request the
+sockets, so a code could look like:
+
+(socket_fd, token) = request_socket(address, port, 'UDP', SHARE_SAMENAME, 'test-application')
+sock = socket.fromfd(socket_fd)
+
+# Some sock.send and sock.recv stuff here
+
+sock.close()
+release_socket(socket_fd) # or release_socket(token)
+
+Known limitations
+-----------------
+Currently the socket creator doesn't support specifying any socket
+options. If it turns out there are any options that need to be set
+before bind(), we'll need to extend it (and extend the protocol as
+well). If we want to support them, we'll have to solve a possible
+conflict (what to do when two applications request the same socket and
+want to share it, but want different options).
+
+The current socket creator doesn't know raw sockets, but if they are
+needed, it should be easy to add.
diff --git a/src/bin/resolver/b10-resolver.xml b/src/bin/resolver/b10-resolver.xml
index bdf4f8a..efe045a 100644
--- a/src/bin/resolver/b10-resolver.xml
+++ b/src/bin/resolver/b10-resolver.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>February 17, 2011</date>
+ <date>August 16, 2011</date>
</refentryinfo>
<refmeta>
@@ -99,11 +99,14 @@
</listitem>
</varlistentry>
+<!-- TODO: this needs to be fixed as -v on command line
+should imply stdout or stderr output also -->
+<!-- TODO: can this -v be overidden by configuration or bindctl? -->
<varlistentry>
<term><option>-v</option></term>
<listitem><para>
- Enabled verbose mode. This enables diagnostic messages to
- STDERR.
+ Enable verbose mode.
+ This sets logging to the maximum debugging level.
</para></listitem>
</varlistentry>
@@ -147,6 +150,22 @@ once that is merged you can for instance do 'config add Resolver/forward_address
</para>
<para>
+<!-- TODO: need more explanation or point to guide. -->
+<!-- TODO: what about a netmask or cidr? -->
+<!-- TODO: document "key" -->
+<!-- TODO: where are the TSIG keys defined? -->
+<!-- TODO: key and from are mutually exclusive? what if both defined? -->
+ <varname>query_acl</varname> is a list of query access control
+ rules. The list items are the <varname>action</varname> string
+ and the <varname>from</varname> or <varname>key</varname> strings.
+ The possible actions are ACCEPT, REJECT and DROP.
+ The <varname>from</varname> is a remote (source) IPv4 or IPv6
+ address or special keyword.
+ The <varname>key</varname> is a TSIG key name.
+ The default configuration accepts queries from 127.0.0.1 and ::1.
+ </para>
+
+ <para>
<varname>retries</varname> is the number of times to retry
(resend query) after a query timeout
(<varname>timeout_query</varname>).
@@ -234,7 +253,8 @@ once that is merged you can for instance do 'config add Resolver/forward_address
The <command>b10-resolver</command> daemon was first coded in
September 2010. The initial implementation only provided
forwarding. Iteration was introduced in January 2011.
-<!-- TODO: document when caching was added -->
+ Caching was implemented in February 2011.
+ Access control was introduced in June 2011.
<!-- TODO: document when validation was added -->
</para>
</refsect1>
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index f0c472d..1164711 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>Oct 15, 2010</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -67,6 +67,7 @@
it. <command>b10-stats</command> invokes "sendstats" command
for <command>bind10</command> after its initial starting because it's
sure to collect statistics data from <command>bind10</command>.
+<!-- TODO: reword that last sentence? -->
</para>
</refsect1>
@@ -87,6 +88,123 @@
</refsect1>
<refsect1>
+ <title>CONFIGURATION AND COMMANDS</title>
+
+ <para>
+ The <command>b10-stats</command> command does not have any
+ configurable settings.
+ </para>
+
+<!-- TODO: formating -->
+ <para>
+ The configuration commands are:
+ </para>
+
+ <para>
+<!-- TODO: remove is removed in trac930 -->
+ <command>remove</command> removes the named statistics name and data.
+ </para>
+
+ <para>
+<!-- TODO: reset is removed in trac930 -->
+ <command>reset</command> will reset all statistics data to
+ default values except for constant names.
+ This may re-add previously removed statistics names.
+ </para>
+
+ <para>
+ <command>set</command>
+<!-- TODO: document this -->
+ </para>
+
+ <para>
+ <command>show</command> will send the statistics data
+ in JSON format.
+ By default, it outputs all the statistics data it has collected.
+ An optional item name may be specified to receive individual output.
+ </para>
+
+<!-- TODO: document showschema -->
+
+ <para>
+ <command>shutdown</command> will shutdown the
+ <command>b10-stats</command> process.
+ (Note that the <command>bind10</command> parent may restart it.)
+ </para>
+
+ <para>
+ <command>status</command> simply indicates that the daemon is
+ running.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The <command>b10-stats</command> daemon contains these statistics:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>report_time</term>
+<!-- TODO: why not named stats.report_time? -->
+ <listitem><simpara>The latest report date and time in
+ ISO 8601 format.</simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.boot_time</term>
+ <listitem><simpara>The date and time when this daemon was
+ started in ISO 8601 format.
+ This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.last_update_time</term>
+ <listitem><simpara>The date and time (in ISO 8601 format)
+ when this daemon last received data from another component.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.lname</term>
+ <listitem><simpara>This is the name used for the
+ <command>b10-msgq</command> command-control channel.
+ (This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.)
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.start_time</term>
+ <listitem><simpara>This is the date and time (in ISO 8601 format)
+ when this daemon started collecting data.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.timestamp</term>
+ <listitem><simpara>The current date and time represented in
+ seconds since UNIX epoch (1970-01-01T0 0:00:00Z) with
+ precision (delimited with a period) up to
+ one hundred thousandth of second.</simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <para>
+ See other manual pages for explanations for their statistics
+ that are kept track by <command>b10-stats</command>.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para><filename>/usr/local/share/bind10-devel/stats.spec</filename>
<!--TODO: The filename should be computed from prefix-->
@@ -126,7 +244,7 @@
<title>HISTORY</title>
<para>
The <command>b10-stats</command> daemon was initially designed
- and implemented by Naoki Kambe of JPRS in Oct 2010.
+ and implemented by Naoki Kambe of JPRS in October 2010.
</para>
</refsect1>
</refentry><!--
diff --git a/src/bin/stats/stats-schema.spec b/src/bin/stats/stats-schema.spec
index 37e9c1a..5252865 100644
--- a/src/bin/stats/stats-schema.spec
+++ b/src/bin/stats/stats-schema.spec
@@ -54,8 +54,7 @@
"item_optional": false,
"item_default": 0.0,
"item_title": "stats.Timestamp",
- "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)",
- "item_format": "second"
+ "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
},
{
"item_name": "stats.lname",
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
index 25f6b54..635eb48 100644
--- a/src/bin/stats/stats.spec
+++ b/src/bin/stats/stats.spec
@@ -56,6 +56,51 @@
"command_description": "Shut down the stats module",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "report_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Report time",
+ "item_description": "A date time when stats module reports",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when the stats module starts initially or when the stats module restarts",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "last_update_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Last update time",
+ "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "timestamp",
+ "item_type": "real",
+ "item_optional": false,
+ "item_default": 0.0,
+ "item_title": "Timestamp",
+ "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
+ },
+ {
+ "item_name": "lname",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "",
+ "item_title": "Local Name",
+ "item_description": "A localname of stats module given via CC protocol"
+ }
]
}
}
diff --git a/src/bin/stats/tests/isc/config/ccsession.py b/src/bin/stats/tests/isc/config/ccsession.py
index a4e9c37..50f7c1b 100644
--- a/src/bin/stats/tests/isc/config/ccsession.py
+++ b/src/bin/stats/tests/isc/config/ccsession.py
@@ -23,6 +23,7 @@ external module.
import json
import os
+import time
from isc.cc.session import Session
COMMAND_CONFIG_UPDATE = "config_update"
@@ -72,6 +73,9 @@ class ModuleSpecError(Exception):
class ModuleSpec:
def __init__(self, module_spec, check = True):
+ # check only confi_data for testing
+ if check and "config_data" in module_spec:
+ _check_config_spec(module_spec["config_data"])
self._module_spec = module_spec
def get_config_spec(self):
@@ -83,6 +87,91 @@ class ModuleSpec:
def get_module_name(self):
return self._module_spec['module_name']
+def _check_config_spec(config_data):
+ # config data is a list of items represented by dicts that contain
+ # things like "item_name", depending on the type they can have
+ # specific subitems
+ """Checks a list that contains the configuration part of the
+ specification. Raises a ModuleSpecError if there is a
+ problem."""
+ if type(config_data) != list:
+ raise ModuleSpecError("config_data is of type " + str(type(config_data)) + ", not a list of items")
+ for config_item in config_data:
+ _check_item_spec(config_item)
+
+def _check_item_spec(config_item):
+ """Checks the dict that defines one config item
+ (i.e. containing "item_name", "item_type", etc.
+ Raises a ModuleSpecError if there is an error"""
+ if type(config_item) != dict:
+ raise ModuleSpecError("item spec not a dict")
+ if "item_name" not in config_item:
+ raise ModuleSpecError("no item_name in config item")
+ if type(config_item["item_name"]) != str:
+ raise ModuleSpecError("item_name is not a string: " + str(config_item["item_name"]))
+ item_name = config_item["item_name"]
+ if "item_type" not in config_item:
+ raise ModuleSpecError("no item_type in config item")
+ item_type = config_item["item_type"]
+ if type(item_type) != str:
+ raise ModuleSpecError("item_type in " + item_name + " is not a string: " + str(type(item_type)))
+ if item_type not in ["integer", "real", "boolean", "string", "list", "map", "any"]:
+ raise ModuleSpecError("unknown item_type in " + item_name + ": " + item_type)
+ if "item_optional" in config_item:
+ if type(config_item["item_optional"]) != bool:
+ raise ModuleSpecError("item_default in " + item_name + " is not a boolean")
+ if not config_item["item_optional"] and "item_default" not in config_item:
+ raise ModuleSpecError("no default value for non-optional item " + item_name)
+ else:
+ raise ModuleSpecError("item_optional not in item " + item_name)
+ if "item_default" in config_item:
+ item_default = config_item["item_default"]
+ if (item_type == "integer" and type(item_default) != int) or \
+ (item_type == "real" and type(item_default) != float) or \
+ (item_type == "boolean" and type(item_default) != bool) or \
+ (item_type == "string" and type(item_default) != str) or \
+ (item_type == "list" and type(item_default) != list) or \
+ (item_type == "map" and type(item_default) != dict):
+ raise ModuleSpecError("Wrong type for item_default in " + item_name)
+ # TODO: once we have check_type, run the item default through that with the list|map_item_spec
+ if item_type == "list":
+ if "list_item_spec" not in config_item:
+ raise ModuleSpecError("no list_item_spec in list item " + item_name)
+ if type(config_item["list_item_spec"]) != dict:
+ raise ModuleSpecError("list_item_spec in " + item_name + " is not a dict")
+ _check_item_spec(config_item["list_item_spec"])
+ if item_type == "map":
+ if "map_item_spec" not in config_item:
+ raise ModuleSpecError("no map_item_sepc in map item " + item_name)
+ if type(config_item["map_item_spec"]) != list:
+ raise ModuleSpecError("map_item_spec in " + item_name + " is not a list")
+ for map_item in config_item["map_item_spec"]:
+ if type(map_item) != dict:
+ raise ModuleSpecError("map_item_spec element is not a dict")
+ _check_item_spec(map_item)
+ if 'item_format' in config_item and 'item_default' in config_item:
+ item_format = config_item["item_format"]
+ item_default = config_item["item_default"]
+ if not _check_format(item_default, item_format):
+ raise ModuleSpecError(
+ "Wrong format for " + str(item_default) + " in " + str(item_name))
+
+def _check_format(value, format_name):
+ """Check if specified value and format are correct. Return True if
+ is is correct."""
+ # TODO: should be added other format types if necessary
+ time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
+ 'date' : "%Y-%m-%d",
+ 'time' : "%H:%M:%S" }
+ for fmt in time_formats:
+ if format_name == fmt:
+ try:
+ time.strptime(value, time_formats[fmt])
+ return True
+ except (ValueError, TypeError):
+ break
+ return False
+
class ModuleCCSessionError(Exception):
pass
diff --git a/src/bin/xfrin/b10-xfrin.xml b/src/bin/xfrin/b10-xfrin.xml
index ea4c724..a8fe425 100644
--- a/src/bin/xfrin/b10-xfrin.xml
+++ b/src/bin/xfrin/b10-xfrin.xml
@@ -103,6 +103,7 @@ in separate zonemgr process.
<command>b10-xfrin</command> daemon.
The list items are:
<varname>name</varname> (the zone name),
+ <varname>class</varname> (defaults to <quote>IN</quote>),
<varname>master_addr</varname> (the zone master to transfer from),
<varname>master_port</varname> (defaults to 53), and
<varname>tsig_key</varname> (optional TSIG key to use).
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index ad71fe2..9889b80 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -134,6 +134,14 @@
data storage types.
</simpara></note>
+
+<!--
+
+tsig_key_ring list of
+tsig_key string
+
+-->
+
<!-- TODO: formating -->
<para>
The configuration commands are:
diff --git a/src/lib/cache/cache_messages.mes b/src/lib/cache/cache_messages.mes
index 2a68cc2..7f593ec 100644
--- a/src/lib/cache/cache_messages.mes
+++ b/src/lib/cache/cache_messages.mes
@@ -124,7 +124,7 @@ the message will not be cached.
Debug message. The requested data was found in the RRset cache. However, it is
expired, so the cache removed it and is going to pretend nothing was found.
-% CACHE_RRSET_INIT initializing RRset cache for %2 RRsets of class %1
+% CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2
Debug message. The RRset cache to hold at most this many RRsets for the given
class is being created.
diff --git a/src/lib/cc/session.cc b/src/lib/cc/session.cc
index 97d5cf1..e0e24cf 100644
--- a/src/lib/cc/session.cc
+++ b/src/lib/cc/session.cc
@@ -119,7 +119,7 @@ private:
void
SessionImpl::establish(const char& socket_file) {
try {
- LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(socket_file);
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(&socket_file);
socket_.connect(asio::local::stream_protocol::endpoint(&socket_file),
error_);
LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISHED);
diff --git a/src/lib/config/module_spec.cc b/src/lib/config/module_spec.cc
index 306c795..bebe695 100644
--- a/src/lib/config/module_spec.cc
+++ b/src/lib/config/module_spec.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium.
+// Copyright (C) 2010, 2011 Internet Systems Consortium.
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -87,6 +87,61 @@ check_config_item_list(ConstElementPtr spec) {
}
}
+// checks whether the given element is a valid statistics specification
+// returns false if the specification is bad
+bool
+check_format(ConstElementPtr value, ConstElementPtr format_name) {
+ typedef std::map<std::string, std::string> format_types;
+ format_types time_formats;
+ // TODO: should be added other format types if necessary
+ time_formats.insert(
+ format_types::value_type("date-time", "%Y-%m-%dT%H:%M:%SZ") );
+ time_formats.insert(
+ format_types::value_type("date", "%Y-%m-%d") );
+ time_formats.insert(
+ format_types::value_type("time", "%H:%M:%S") );
+ BOOST_FOREACH (const format_types::value_type& f, time_formats) {
+ if (format_name->stringValue() == f.first) {
+ struct tm tm;
+ std::vector<char> buf(32);
+ memset(&tm, 0, sizeof(tm));
+ // reverse check
+ return (strptime(value->stringValue().c_str(),
+ f.second.c_str(), &tm) != NULL
+ && strftime(&buf[0], buf.size(),
+ f.second.c_str(), &tm) != 0
+ && strncmp(value->stringValue().c_str(),
+ &buf[0], buf.size()) == 0);
+ }
+ }
+ return (false);
+}
+
+void check_statistics_item_list(ConstElementPtr spec);
+
+void
+check_statistics_item_list(ConstElementPtr spec) {
+ if (spec->getType() != Element::list) {
+ throw ModuleSpecError("statistics is not a list of elements");
+ }
+ BOOST_FOREACH(ConstElementPtr item, spec->listValue()) {
+ check_config_item(item);
+ // additional checks for statistics
+ check_leaf_item(item, "item_title", Element::string, true);
+ check_leaf_item(item, "item_description", Element::string, true);
+ check_leaf_item(item, "item_format", Element::string, false);
+ // checks name of item_format and validation of item_default
+ if (item->contains("item_format")
+ && item->contains("item_default")) {
+ if(!check_format(item->get("item_default"),
+ item->get("item_format"))) {
+ throw ModuleSpecError(
+ "item_default not valid type of item_format");
+ }
+ }
+ }
+}
+
void
check_command(ConstElementPtr spec) {
check_leaf_item(spec, "command_name", Element::string, true);
@@ -116,6 +171,9 @@ check_data_specification(ConstElementPtr spec) {
if (spec->contains("commands")) {
check_command_list(spec->get("commands"));
}
+ if (spec->contains("statistics")) {
+ check_statistics_item_list(spec->get("statistics"));
+ }
}
// checks whether the given element is a valid module specification
@@ -165,6 +223,15 @@ ModuleSpec::getConfigSpec() const {
}
}
+ConstElementPtr
+ModuleSpec::getStatisticsSpec() const {
+ if (module_specification->contains("statistics")) {
+ return (module_specification->get("statistics"));
+ } else {
+ return (ElementPtr());
+ }
+}
+
const std::string
ModuleSpec::getModuleName() const {
return (module_specification->get("module_name")->stringValue());
@@ -186,6 +253,12 @@ ModuleSpec::validateConfig(ConstElementPtr data, const bool full) const {
}
bool
+ModuleSpec::validateStatistics(ConstElementPtr data, const bool full) const {
+ ConstElementPtr spec = module_specification->find("statistics");
+ return (validateSpecList(spec, data, full, ElementPtr()));
+}
+
+bool
ModuleSpec::validateCommand(const std::string& command,
ConstElementPtr args,
ElementPtr errors) const
@@ -223,6 +296,14 @@ ModuleSpec::validateConfig(ConstElementPtr data, const bool full,
return (validateSpecList(spec, data, full, errors));
}
+bool
+ModuleSpec::validateStatistics(ConstElementPtr data, const bool full,
+ ElementPtr errors) const
+{
+ ConstElementPtr spec = module_specification->find("statistics");
+ return (validateSpecList(spec, data, full, errors));
+}
+
ModuleSpec
moduleSpecFromFile(const std::string& file_name, const bool check)
throw(JSONError, ModuleSpecError)
@@ -343,6 +424,14 @@ ModuleSpec::validateItem(ConstElementPtr spec, ConstElementPtr data,
}
}
}
+ if (spec->contains("item_format")) {
+ if (!check_format(data, spec->get("item_format"))) {
+ if (errors) {
+ errors->add(Element::create("Format mismatch"));
+ }
+ return (false);
+ }
+ }
return (true);
}
diff --git a/src/lib/config/module_spec.h b/src/lib/config/module_spec.h
index ab6e273..ce3762f 100644
--- a/src/lib/config/module_spec.h
+++ b/src/lib/config/module_spec.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium.
+// Copyright (C) 2010, 2011 Internet Systems Consortium.
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -71,6 +71,12 @@ namespace isc { namespace config {
/// part of the specification
isc::data::ConstElementPtr getConfigSpec() const;
+ /// Returns the statistics part of the specification as an
+ /// ElementPtr
+ /// \return ElementPtr Shared pointer to the statistics
+ /// part of the specification
+ isc::data::ConstElementPtr getStatisticsSpec() const;
+
/// Returns the full module specification as an ElementPtr
/// \return ElementPtr Shared pointer to the specification
isc::data::ConstElementPtr getFullSpec() const {
@@ -95,6 +101,17 @@ namespace isc { namespace config {
bool validateConfig(isc::data::ConstElementPtr data,
const bool full = false) const;
+ // returns true if the given element conforms to this data
+ // statistics specification
+ /// Validates the given statistics data for this specification.
+ /// \param data The base \c Element of the data to check
+ /// \param full If true, all non-optional statistics parameters
+ /// must be specified.
+ /// \return true if the data conforms to the specification,
+ /// false otherwise.
+ bool validateStatistics(isc::data::ConstElementPtr data,
+ const bool full = false) const;
+
/// Validates the arguments for the given command
///
/// This checks the command and argument against the
@@ -142,6 +159,10 @@ namespace isc { namespace config {
bool validateConfig(isc::data::ConstElementPtr data, const bool full,
isc::data::ElementPtr errors) const;
+ /// errors must be of type ListElement
+ bool validateStatistics(isc::data::ConstElementPtr data, const bool full,
+ isc::data::ElementPtr errors) const;
+
private:
bool validateItem(isc::data::ConstElementPtr spec,
isc::data::ConstElementPtr data,
diff --git a/src/lib/config/tests/ccsession_unittests.cc b/src/lib/config/tests/ccsession_unittests.cc
index 5ea4f32..793fa30 100644
--- a/src/lib/config/tests/ccsession_unittests.cc
+++ b/src/lib/config/tests/ccsession_unittests.cc
@@ -184,7 +184,7 @@ TEST_F(CCSessionTest, session2) {
ConstElementPtr msg;
std::string group, to;
msg = session.getFirstMessage(group, to);
- EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\" } ] }", msg->str());
+ EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\", \"statistics\": [ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ] } ] }", msg->str());
EXPECT_EQ("ConfigManager", group);
EXPECT_EQ("*", to);
EXPECT_EQ(0, session.getMsgQueue()->size());
@@ -231,7 +231,7 @@ TEST_F(CCSessionTest, session3) {
ConstElementPtr msg;
std::string group, to;
msg = session.getFirstMessage(group, to);
- EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\" } ] }", msg->str());
+ EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\", \"statistics\": [ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ] } ] }", msg->str());
EXPECT_EQ("ConfigManager", group);
EXPECT_EQ("*", to);
EXPECT_EQ(1, session.getMsgQueue()->size());
diff --git a/src/lib/config/tests/module_spec_unittests.cc b/src/lib/config/tests/module_spec_unittests.cc
index d642af8..b2ca7b4 100644
--- a/src/lib/config/tests/module_spec_unittests.cc
+++ b/src/lib/config/tests/module_spec_unittests.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2009, 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,8 @@
#include <fstream>
+#include <boost/foreach.hpp>
+
#include <config/tests/data_def_unittests_config.h>
using namespace isc::data;
@@ -57,6 +59,7 @@ TEST(ModuleSpec, ReadingSpecfiles) {
dd = moduleSpecFromFile(specfile("spec2.spec"));
EXPECT_EQ("[ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ]", dd.getCommandsSpec()->str());
+ EXPECT_EQ("[ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ]", dd.getStatisticsSpec()->str());
EXPECT_EQ("Spec2", dd.getModuleName());
EXPECT_EQ("", dd.getModuleDescription());
@@ -64,6 +67,11 @@ TEST(ModuleSpec, ReadingSpecfiles) {
EXPECT_EQ("Spec25", dd.getModuleName());
EXPECT_EQ("Just an empty module", dd.getModuleDescription());
EXPECT_THROW(moduleSpecFromFile(specfile("spec26.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec34.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec35.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec36.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec37.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec38.spec")), ModuleSpecError);
std::ifstream file;
file.open(specfile("spec1.spec").c_str());
@@ -71,6 +79,7 @@ TEST(ModuleSpec, ReadingSpecfiles) {
EXPECT_EQ(dd.getFullSpec()->get("module_name")
->stringValue(), "Spec1");
EXPECT_TRUE(isNull(dd.getCommandsSpec()));
+ EXPECT_TRUE(isNull(dd.getStatisticsSpec()));
std::ifstream file2;
file2.open(specfile("spec8.spec").c_str());
@@ -114,6 +123,12 @@ TEST(ModuleSpec, SpecfileConfigData) {
"commands is not a list of elements");
}
+TEST(ModuleSpec, SpecfileStatistics) {
+ moduleSpecError("spec36.spec", "item_default not valid type of item_format");
+ moduleSpecError("spec37.spec", "statistics is not a list of elements");
+ moduleSpecError("spec38.spec", "item_default not valid type of item_format");
+}
+
TEST(ModuleSpec, SpecfileCommands) {
moduleSpecError("spec17.spec",
"command_name missing in { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\" }");
@@ -137,6 +152,17 @@ dataTest(const ModuleSpec& dd, const std::string& data_file_name) {
}
bool
+statisticsTest(const ModuleSpec& dd, const std::string& data_file_name) {
+ std::ifstream data_file;
+
+ data_file.open(specfile(data_file_name).c_str());
+ ConstElementPtr data = Element::fromJSON(data_file, data_file_name);
+ data_file.close();
+
+ return (dd.validateStatistics(data));
+}
+
+bool
dataTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
ElementPtr errors)
{
@@ -149,6 +175,19 @@ dataTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
return (dd.validateConfig(data, true, errors));
}
+bool
+statisticsTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
+ ElementPtr errors)
+{
+ std::ifstream data_file;
+
+ data_file.open(specfile(data_file_name).c_str());
+ ConstElementPtr data = Element::fromJSON(data_file, data_file_name);
+ data_file.close();
+
+ return (dd.validateStatistics(data, true, errors));
+}
+
TEST(ModuleSpec, DataValidation) {
ModuleSpec dd = moduleSpecFromFile(specfile("spec22.spec"));
@@ -175,6 +214,17 @@ TEST(ModuleSpec, DataValidation) {
EXPECT_EQ("[ \"Unknown item value_does_not_exist\" ]", errors->str());
}
+TEST(ModuleSpec, StatisticsValidation) {
+ ModuleSpec dd = moduleSpecFromFile(specfile("spec33.spec"));
+
+ EXPECT_TRUE(statisticsTest(dd, "data33_1.data"));
+ EXPECT_FALSE(statisticsTest(dd, "data33_2.data"));
+
+ ElementPtr errors = Element::createList();
+ EXPECT_FALSE(statisticsTestWithErrors(dd, "data33_2.data", errors));
+ EXPECT_EQ("[ \"Format mismatch\", \"Format mismatch\", \"Format mismatch\" ]", errors->str());
+}
+
TEST(ModuleSpec, CommandValidation) {
ModuleSpec dd = moduleSpecFromFile(specfile("spec2.spec"));
ConstElementPtr arg = Element::fromJSON("{}");
@@ -220,3 +270,109 @@ TEST(ModuleSpec, NamedSetValidation) {
EXPECT_FALSE(dataTest(dd, "data32_2.data"));
EXPECT_FALSE(dataTest(dd, "data32_3.data"));
}
+
+TEST(ModuleSpec, CheckFormat) {
+
+ const std::string json_begin = "{ \"module_spec\": { \"module_name\": \"Foo\", \"statistics\": [ { \"item_name\": \"dummy_time\", \"item_type\": \"string\", \"item_optional\": true, \"item_title\": \"Dummy Time\", \"item_description\": \"A dummy date time\"";
+ const std::string json_end = " } ] } }";
+ std::string item_default;
+ std::string item_format;
+ std::vector<std::string> specs;
+ ConstElementPtr el;
+
+ specs.clear();
+ item_default = "\"item_default\": \"2011-05-27T19:42:57Z\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-05-27\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"19:42:57\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_format);
+ item_default = "";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_format);
+ item_default = "";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_format);
+
+ item_default = "\"item_default\": \"a\"";
+ specs.push_back("," + item_default);
+ item_default = "\"item_default\": \"b\"";
+ specs.push_back("," + item_default);
+ item_default = "\"item_default\": \"c\"";
+ specs.push_back("," + item_default);
+
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_format);
+
+ specs.push_back("");
+
+ BOOST_FOREACH(std::string s, specs) {
+ el = Element::fromJSON(json_begin + s + json_end)->get("module_spec");
+ EXPECT_NO_THROW(ModuleSpec(el, true));
+ }
+
+ specs.clear();
+ item_default = "\"item_default\": \"2011-05-27T19:42:57Z\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-05-27\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"19:42:57Z\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"2011-13-99T99:99:99Z\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-13-99\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"99:99:99Z\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ // wrong date-time-type format not ending with "Z"
+ item_default = "\"item_default\": \"2011-05-27T19:42:57\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ // wrong date-type format ending with "T"
+ item_default = "\"item_default\": \"2011-05-27T\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ // wrong time-type format ending with "Z"
+ item_default = "\"item_default\": \"19:42:57Z\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ BOOST_FOREACH(std::string s, specs) {
+ el = Element::fromJSON(json_begin + s + json_end)->get("module_spec");
+ EXPECT_THROW(ModuleSpec(el, true), ModuleSpecError);
+ }
+}
diff --git a/src/lib/config/tests/testdata/Makefile.am b/src/lib/config/tests/testdata/Makefile.am
index 91d7f04..0d8b92e 100644
--- a/src/lib/config/tests/testdata/Makefile.am
+++ b/src/lib/config/tests/testdata/Makefile.am
@@ -25,6 +25,8 @@ EXTRA_DIST += data22_10.data
EXTRA_DIST += data32_1.data
EXTRA_DIST += data32_2.data
EXTRA_DIST += data32_3.data
+EXTRA_DIST += data33_1.data
+EXTRA_DIST += data33_2.data
EXTRA_DIST += spec1.spec
EXTRA_DIST += spec2.spec
EXTRA_DIST += spec3.spec
@@ -57,3 +59,9 @@ EXTRA_DIST += spec29.spec
EXTRA_DIST += spec30.spec
EXTRA_DIST += spec31.spec
EXTRA_DIST += spec32.spec
+EXTRA_DIST += spec33.spec
+EXTRA_DIST += spec34.spec
+EXTRA_DIST += spec35.spec
+EXTRA_DIST += spec36.spec
+EXTRA_DIST += spec37.spec
+EXTRA_DIST += spec38.spec
diff --git a/src/lib/config/tests/testdata/data33_1.data b/src/lib/config/tests/testdata/data33_1.data
new file mode 100644
index 0000000..429852c
--- /dev/null
+++ b/src/lib/config/tests/testdata/data33_1.data
@@ -0,0 +1,7 @@
+{
+ "dummy_str": "Dummy String",
+ "dummy_int": 118,
+ "dummy_datetime": "2011-05-27T19:42:57Z",
+ "dummy_date": "2011-05-27",
+ "dummy_time": "19:42:57"
+}
diff --git a/src/lib/config/tests/testdata/data33_2.data b/src/lib/config/tests/testdata/data33_2.data
new file mode 100644
index 0000000..eb0615c
--- /dev/null
+++ b/src/lib/config/tests/testdata/data33_2.data
@@ -0,0 +1,7 @@
+{
+ "dummy_str": "Dummy String",
+ "dummy_int": 118,
+ "dummy_datetime": "xxxx",
+ "dummy_date": "xxxx",
+ "dummy_time": "xxxx"
+}
diff --git a/src/lib/config/tests/testdata/spec2.spec b/src/lib/config/tests/testdata/spec2.spec
index 59b8ebc..4352422 100644
--- a/src/lib/config/tests/testdata/spec2.spec
+++ b/src/lib/config/tests/testdata/spec2.spec
@@ -66,6 +66,17 @@
"command_description": "Shut down BIND 10",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "dummy_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Dummy Time",
+ "item_description": "A dummy date time",
+ "item_format": "date-time"
+ }
]
}
}
diff --git a/src/lib/config/tests/testdata/spec33.spec b/src/lib/config/tests/testdata/spec33.spec
new file mode 100644
index 0000000..3002488
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec33.spec
@@ -0,0 +1,50 @@
+{
+ "module_spec": {
+ "module_name": "Spec33",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String",
+ "item_description": "A dummy string"
+ },
+ {
+ "item_name": "dummy_int",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Dummy Integer",
+ "item_description": "A dummy integer"
+ },
+ {
+ "item_name": "dummy_datetime",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Dummy DateTime",
+ "item_description": "A dummy datetime",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "dummy_date",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01",
+ "item_title": "Dummy Date",
+ "item_description": "A dummy date",
+ "item_format": "date"
+ },
+ {
+ "item_name": "dummy_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "00:00:00",
+ "item_title": "Dummy Time",
+ "item_description": "A dummy time",
+ "item_format": "time"
+ }
+ ]
+ }
+}
diff --git a/src/lib/config/tests/testdata/spec34.spec b/src/lib/config/tests/testdata/spec34.spec
new file mode 100644
index 0000000..dd1f3ca
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec34.spec
@@ -0,0 +1,14 @@
+{
+ "module_spec": {
+ "module_name": "Spec34",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_description": "A dummy string"
+ }
+ ]
+ }
+}
diff --git a/src/lib/config/tests/testdata/spec35.spec b/src/lib/config/tests/testdata/spec35.spec
new file mode 100644
index 0000000..86aaf14
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec35.spec
@@ -0,0 +1,15 @@
+{
+ "module_spec": {
+ "module_name": "Spec35",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec36.spec b/src/lib/config/tests/testdata/spec36.spec
new file mode 100644
index 0000000..fb9ce26
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec36.spec
@@ -0,0 +1,17 @@
+{
+ "module_spec": {
+ "module_name": "Spec36",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String",
+ "item_description": "A dummy string",
+ "item_format": "dummy"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec37.spec b/src/lib/config/tests/testdata/spec37.spec
new file mode 100644
index 0000000..bc444d1
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec37.spec
@@ -0,0 +1,7 @@
+{
+ "module_spec": {
+ "module_name": "Spec37",
+ "statistics": 8
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec38.spec b/src/lib/config/tests/testdata/spec38.spec
new file mode 100644
index 0000000..1892e88
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec38.spec
@@ -0,0 +1,17 @@
+{
+ "module_spec": {
+ "module_name": "Spec38",
+ "statistics": [
+ {
+ "item_name": "dummy_datetime",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "11",
+ "item_title": "Dummy DateTime",
+ "item_description": "A dummy datetime",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index 261baae..db67781 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -22,6 +22,8 @@ libdatasrc_la_SOURCES += zone.h
libdatasrc_la_SOURCES += result.h
libdatasrc_la_SOURCES += logger.h logger.cc
libdatasrc_la_SOURCES += client.h
+libdatasrc_la_SOURCES += database.h database.cc
+libdatasrc_la_SOURCES += sqlite3_accessor.h sqlite3_accessor.cc
nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index a830f00..9fe6519 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -15,6 +15,8 @@
#ifndef __DATA_SOURCE_CLIENT_H
#define __DATA_SOURCE_CLIENT_H 1
+#include <boost/noncopyable.hpp>
+
#include <datasrc/zone.h>
namespace isc {
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
new file mode 100644
index 0000000..04fb44c
--- /dev/null
+++ b/src/lib/datasrc/database.cc
@@ -0,0 +1,405 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <vector>
+
+#include <datasrc/database.h>
+
+#include <exceptions/exceptions.h>
+#include <dns/name.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <datasrc/data_source.h>
+#include <datasrc/logger.h>
+
+#include <boost/foreach.hpp>
+
+using isc::dns::Name;
+
+namespace isc {
+namespace datasrc {
+
+DatabaseClient::DatabaseClient(boost::shared_ptr<DatabaseAccessor>
+ database) :
+ database_(database)
+{
+ if (database_.get() == NULL) {
+ isc_throw(isc::InvalidParameter,
+ "No database provided to DatabaseClient");
+ }
+}
+
+DataSourceClient::FindResult
+DatabaseClient::findZone(const Name& name) const {
+ std::pair<bool, int> zone(database_->getZone(name));
+ // Try exact first
+ if (zone.first) {
+ return (FindResult(result::SUCCESS,
+ ZoneFinderPtr(new Finder(database_,
+ zone.second, name))));
+ }
+ // Then super domains
+ // Start from 1, as 0 is covered above
+ for (size_t i(1); i < name.getLabelCount(); ++i) {
+ isc::dns::Name superdomain(name.split(i));
+ zone = database_->getZone(superdomain);
+ if (zone.first) {
+ return (FindResult(result::PARTIALMATCH,
+ ZoneFinderPtr(new Finder(database_,
+ zone.second,
+ superdomain))));
+ }
+ }
+ // No, really nothing
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+}
+
+DatabaseClient::Finder::Finder(boost::shared_ptr<DatabaseAccessor>
+ database, int zone_id,
+ const isc::dns::Name& origin) :
+ database_(database),
+ zone_id_(zone_id),
+ origin_(origin)
+{ }
+
+namespace {
+// Adds the given Rdata to the given RRset
+// If the rrset is an empty pointer, a new one is
+// created with the given name, class, type and ttl
+// The type is checked if the rrset exists, but the
+// name is not.
+//
+// Then adds the given rdata to the set
+//
+// Raises a DataSourceError if the type does not
+// match, or if the given rdata string does not
+// parse correctly for the given type and class
+//
+// The DatabaseAccessor is passed to print the
+// database name in the log message if the TTL is
+// modified
+void addOrCreate(isc::dns::RRsetPtr& rrset,
+ const isc::dns::Name& name,
+ const isc::dns::RRClass& cls,
+ const isc::dns::RRType& type,
+ const isc::dns::RRTTL& ttl,
+ const std::string& rdata_str,
+ const DatabaseAccessor& db
+ )
+{
+ if (!rrset) {
+ rrset.reset(new isc::dns::RRset(name, cls, type, ttl));
+ } else {
+ // This is a check to make sure find() is not messing things up
+ assert(type == rrset->getType());
+ if (ttl != rrset->getTTL()) {
+ if (ttl < rrset->getTTL()) {
+ rrset->setTTL(ttl);
+ }
+ logger.info(DATASRC_DATABASE_FIND_TTL_MISMATCH)
+ .arg(db.getDBName()).arg(name).arg(cls)
+ .arg(type).arg(rrset->getTTL());
+ }
+ }
+ try {
+ rrset->addRdata(isc::dns::rdata::createRdata(type, cls, rdata_str));
+ } catch (const isc::dns::rdata::InvalidRdataText& ivrt) {
+ // at this point, rrset may have been initialised for no reason,
+ // and won't be used. But the caller would drop the shared_ptr
+ // on such an error anyway, so we don't care.
+ isc_throw(DataSourceError,
+ "bad rdata in database for " << name << " "
+ << type << ": " << ivrt.what());
+ }
+}
+
+// This class keeps a short-lived store of RRSIG records encountered
+// during a call to find(). If the backend happens to return signatures
+// before the actual data, we might not know which signatures we will need
+// So if they may be relevant, we store the in this class.
+//
+// (If this class seems useful in other places, we might want to move
+// it to util. That would also provide an opportunity to add unit tests)
+class RRsigStore {
+public:
+ // Adds the given signature Rdata to the store
+ // The signature rdata MUST be of the RRSIG rdata type
+ // (the caller must make sure of this).
+ // NOTE: if we move this class to a public namespace,
+ // we should add a type_covered argument, so as not
+ // to have to do this cast here.
+ void addSig(isc::dns::rdata::RdataPtr sig_rdata) {
+ const isc::dns::RRType& type_covered =
+ static_cast<isc::dns::rdata::generic::RRSIG*>(
+ sig_rdata.get())->typeCovered();
+ sigs[type_covered].push_back(sig_rdata);
+ }
+
+ // If the store contains signatures for the type of the given
+ // rrset, they are appended to it.
+ void appendSignatures(isc::dns::RRsetPtr& rrset) const {
+ std::map<isc::dns::RRType,
+ std::vector<isc::dns::rdata::RdataPtr> >::const_iterator
+ found = sigs.find(rrset->getType());
+ if (found != sigs.end()) {
+ BOOST_FOREACH(isc::dns::rdata::RdataPtr sig, found->second) {
+ rrset->addRRsig(sig);
+ }
+ }
+ }
+
+private:
+ std::map<isc::dns::RRType, std::vector<isc::dns::rdata::RdataPtr> > sigs;
+};
+}
+
+std::pair<bool, isc::dns::RRsetPtr>
+DatabaseClient::Finder::getRRset(const isc::dns::Name& name,
+ const isc::dns::RRType* type,
+ bool want_cname, bool want_dname,
+ bool want_ns)
+{
+ RRsigStore sig_store;
+ database_->searchForRecords(zone_id_, name.toText());
+ bool records_found = false;
+ isc::dns::RRsetPtr result_rrset;
+
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ while (database_->getNextRecord(columns, DatabaseAccessor::COLUMN_COUNT)) {
+ if (!records_found) {
+ records_found = true;
+ }
+
+ try {
+ const isc::dns::RRType cur_type(columns[DatabaseAccessor::
+ TYPE_COLUMN]);
+ const isc::dns::RRTTL cur_ttl(columns[DatabaseAccessor::
+ TTL_COLUMN]);
+ // Ths sigtype column was an optimization for finding the
+ // relevant RRSIG RRs for a lookup. Currently this column is
+ // not used in this revised datasource implementation. We
+ // should either start using it again, or remove it from use
+ // completely (i.e. also remove it from the schema and the
+ // backend implementation).
+ // Note that because we don't use it now, we also won't notice
+ // it if the value is wrong (i.e. if the sigtype column
+ // contains an rrtype that is different from the actual value
+ // of the 'type covered' field in the RRSIG Rdata).
+ //cur_sigtype(columns[SIGTYPE_COLUMN]);
+
+ // Check for delegations before checking for the right type.
+ // This is needed to properly delegate request for the NS
+ // record itself.
+ //
+ // This happens with NS only, CNAME must be alone and DNAME
+ // is not checked in the exact queried domain.
+ if (want_ns && cur_type == isc::dns::RRType::NS()) {
+ if (result_rrset &&
+ result_rrset->getType() != isc::dns::RRType::NS()) {
+ isc_throw(DataSourceError, "NS found together with data"
+ " in non-apex domain " + name.toText());
+ }
+ addOrCreate(result_rrset, name, getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *database_);
+ } else if (type != NULL && cur_type == *type) {
+ if (result_rrset &&
+ result_rrset->getType() == isc::dns::RRType::CNAME()) {
+ isc_throw(DataSourceError, "CNAME found but it is not "
+ "the only record for " + name.toText());
+ } else if (result_rrset && want_ns &&
+ result_rrset->getType() == isc::dns::RRType::NS()) {
+ isc_throw(DataSourceError, "NS found together with data"
+ " in non-apex domain " + name.toText());
+ }
+ addOrCreate(result_rrset, name, getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *database_);
+ } else if (want_cname && cur_type == isc::dns::RRType::CNAME()) {
+ // There should be no other data, so result_rrset should
+ // be empty.
+ if (result_rrset) {
+ isc_throw(DataSourceError, "CNAME found but it is not "
+ "the only record for " + name.toText());
+ }
+ addOrCreate(result_rrset, name, getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *database_);
+ } else if (want_dname && cur_type == isc::dns::RRType::DNAME()) {
+ // There should be max one RR of DNAME present
+ if (result_rrset &&
+ result_rrset->getType() == isc::dns::RRType::DNAME()) {
+ isc_throw(DataSourceError, "DNAME with multiple RRs in " +
+ name.toText());
+ }
+ addOrCreate(result_rrset, name, getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *database_);
+ } else if (cur_type == isc::dns::RRType::RRSIG()) {
+ // If we get signatures before we get the actual data, we
+ // can't know which ones to keep and which to drop...
+ // So we keep a separate store of any signature that may be
+ // relevant and add them to the final RRset when we are
+ // done.
+ // A possible optimization here is to not store them for
+ // types we are certain we don't need
+ sig_store.addSig(isc::dns::rdata::createRdata(cur_type,
+ getClass(), columns[DatabaseAccessor::RDATA_COLUMN]));
+ }
+ } catch (const isc::dns::InvalidRRType& irt) {
+ isc_throw(DataSourceError, "Invalid RRType in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ TYPE_COLUMN]);
+ } catch (const isc::dns::InvalidRRTTL& irttl) {
+ isc_throw(DataSourceError, "Invalid TTL in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ TTL_COLUMN]);
+ } catch (const isc::dns::rdata::InvalidRdataText& ird) {
+ isc_throw(DataSourceError, "Invalid rdata in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ RDATA_COLUMN]);
+ }
+ }
+ if (result_rrset) {
+ sig_store.appendSignatures(result_rrset);
+ }
+ return (std::pair<bool, isc::dns::RRsetPtr>(records_found, result_rrset));
+}
+
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList*,
+ const FindOptions options)
+{
+ // This variable is used to determine the difference between
+ // NXDOMAIN and NXRRSET
+ bool records_found = false;
+ bool glue_ok(options & FIND_GLUE_OK);
+ isc::dns::RRsetPtr result_rrset;
+ ZoneFinder::Result result_status = SUCCESS;
+ std::pair<bool, isc::dns::RRsetPtr> found;
+ logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
+ .arg(database_->getDBName()).arg(name).arg(type);
+
+ try {
+ // First, do we have any kind of delegation (NS/DNAME) here?
+ Name origin(getOrigin());
+ size_t origin_label_count(origin.getLabelCount());
+ size_t current_label_count(name.getLabelCount());
+ // This is how many labels we remove to get origin
+ size_t remove_labels(current_label_count - origin_label_count);
+
+ // Now go trough all superdomains from origin down
+ for (int i(remove_labels); i > 0; --i) {
+ Name superdomain(name.split(i));
+ // Look if there's NS or DNAME (but ignore the NS in origin)
+ found = getRRset(superdomain, NULL, false, true,
+ i != remove_labels && !glue_ok);
+ if (found.second) {
+ // We found something redirecting somewhere else
+ // (it can be only NS or DNAME here)
+ result_rrset = found.second;
+ if (result_rrset->getType() == isc::dns::RRType::NS()) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DELEGATION).
+ arg(database_->getDBName()).arg(superdomain);
+ result_status = DELEGATION;
+ } else {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DNAME).
+ arg(database_->getDBName()).arg(superdomain);
+ result_status = DNAME;
+ }
+ // Don't search more
+ break;
+ }
+ }
+
+ if (!result_rrset) { // Only if we didn't find a redirect already
+ // Try getting the final result and extract it
+ // It is special if there's a CNAME or NS, DNAME is ignored here
+ // And we don't consider the NS in origin
+ found = getRRset(name, &type, true, false,
+ name != origin && !glue_ok);
+ records_found = found.first;
+ result_rrset = found.second;
+ if (result_rrset && name != origin && !glue_ok &&
+ result_rrset->getType() == isc::dns::RRType::NS()) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DELEGATION_EXACT).
+ arg(database_->getDBName()).arg(name);
+ result_status = DELEGATION;
+ } else if (result_rrset && type != isc::dns::RRType::CNAME() &&
+ result_rrset->getType() == isc::dns::RRType::CNAME()) {
+ result_status = CNAME;
+ }
+ }
+ } catch (const DataSourceError& dse) {
+ logger.error(DATASRC_DATABASE_FIND_ERROR)
+ .arg(database_->getDBName()).arg(dse.what());
+ // call cleanup and rethrow
+ database_->resetSearch();
+ throw;
+ } catch (const isc::Exception& isce) {
+ logger.error(DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR)
+ .arg(database_->getDBName()).arg(isce.what());
+ // cleanup, change it to a DataSourceError and rethrow
+ database_->resetSearch();
+ isc_throw(DataSourceError, isce.what());
+ } catch (const std::exception& ex) {
+ logger.error(DATASRC_DATABASE_FIND_UNCAUGHT_ERROR)
+ .arg(database_->getDBName()).arg(ex.what());
+ database_->resetSearch();
+ throw;
+ }
+
+ if (!result_rrset) {
+ if (records_found) {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXRRSET)
+ .arg(database_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXRRSET;
+ } else {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXDOMAIN)
+ .arg(database_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXDOMAIN;
+ }
+ } else {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_RRSET)
+ .arg(database_->getDBName()).arg(*result_rrset);
+ }
+ return (FindResult(result_status, result_rrset));
+}
+
+Name
+DatabaseClient::Finder::getOrigin() const {
+ return (origin_);
+}
+
+isc::dns::RRClass
+DatabaseClient::Finder::getClass() const {
+ // TODO Implement
+ return isc::dns::RRClass::IN();
+}
+
+}
+}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
new file mode 100644
index 0000000..95782ef
--- /dev/null
+++ b/src/lib/datasrc/database.h
@@ -0,0 +1,367 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATABASE_DATASRC_H
+#define __DATABASE_DATASRC_H
+
+#include <datasrc/client.h>
+
+#include <dns/name.h>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Abstraction of lowlevel database with DNS data
+ *
+ * This class is defines interface to databases. Each supported database
+ * will provide methods for accessing the data stored there in a generic
+ * manner. The methods are meant to be low-level, without much or any knowledge
+ * about DNS and should be possible to translate directly to queries.
+ *
+ * On the other hand, how the communication with database is done and in what
+ * schema (in case of relational/SQL database) is up to the concrete classes.
+ *
+ * This class is non-copyable, as copying connections to database makes little
+ * sense and will not be needed.
+ *
+ * \todo Is it true this does not need to be copied? For example the zone
+ * iterator might need it's own copy. But a virtual clone() method might
+ * be better for that than copy constructor.
+ *
+ * \note The same application may create multiple connections to the same
+ * database, having multiple instances of this class. If the database
+ * allows having multiple open queries at one connection, the connection
+ * class may share it.
+ */
+class DatabaseAccessor : boost::noncopyable {
+public:
+ /**
+ * \brief Destructor
+ *
+ * It is empty, but needs a virtual one, since we will use the derived
+ * classes in polymorphic way.
+ */
+ virtual ~DatabaseAccessor() { }
+ /**
+ * \brief Retrieve a zone identifier
+ *
+ * This method looks up a zone for the given name in the database. It
+ * should match only exact zone name (eg. name is equal to the zone's
+ * apex), as the DatabaseClient will loop trough the labels itself and
+ * find the most suitable zone.
+ *
+ * It is not specified if and what implementation of this method may throw,
+ * so code should expect anything.
+ *
+ * \param name The name of the zone's apex to be looked up.
+ * \return The first part of the result indicates if a matching zone
+ * was found. In case it was, the second part is internal zone ID.
+ * This one will be passed to methods finding data in the zone.
+ * It is not required to keep them, in which case whatever might
+ * be returned - the ID is only passed back to the database as
+ * an opaque handle.
+ */
+ virtual std::pair<bool, int> getZone(const isc::dns::Name& name) const = 0;
+
+ /**
+ * \brief Starts a new search for records of the given name in the given zone
+ *
+ * The data searched by this call can be retrieved with subsequent calls to
+ * getNextRecord().
+ *
+ * \exception DataSourceError if there is a problem connecting to the
+ * backend database
+ *
+ * \param zone_id The zone to search in, as returned by getZone()
+ * \param name The name of the records to find
+ */
+ virtual void searchForRecords(int zone_id, const std::string& name) = 0;
+
+ /**
+ * \brief Retrieves the next record from the search started with searchForRecords()
+ *
+ * Returns a boolean specifying whether or not there was more data to read.
+ * In the case of a database error, a DatasourceError is thrown.
+ *
+ * The columns passed is an array of std::strings consisting of
+ * DatabaseConnection::COLUMN_COUNT elements, the elements of which
+ * are defined in DatabaseConnection::RecordColumns, in their basic
+ * string representation.
+ *
+ * If you are implementing a derived database connection class, you
+ * should have this method check the column_count value, and fill the
+ * array with strings conforming to their description in RecordColumn.
+ *
+ * \exception DatasourceError if there was an error reading from the database
+ *
+ * \param columns The elements of this array will be filled with the data
+ * for one record as defined by RecordColumns
+ * If there was no data, the array is untouched.
+ * \return true if there was a next record, false if there was not
+ */
+ virtual bool getNextRecord(std::string columns[], size_t column_count) = 0;
+
+ /**
+ * \brief Resets the current search initiated with searchForRecords()
+ *
+ * This method will be called when the called of searchForRecords() and
+ * getNextRecord() finds bad data, and aborts the current search.
+ * It should clean up whatever handlers searchForRecords() created, and
+ * any other state modified or needed by getNextRecord()
+ *
+ * Of course, the implementation of getNextRecord may also use it when
+ * it is done with a search. If it does, the implementation of this
+ * method should make sure it can handle being called multiple times.
+ *
+ * The implementation for this method should make sure it never throws.
+ */
+ virtual void resetSearch() = 0;
+
+ /**
+ * Definitions of the fields as they are required to be filled in
+ * by getNextRecord()
+ *
+ * When implementing getNextRecord(), the columns array should
+ * be filled with the values as described in this enumeration,
+ * in this order, i.e. TYPE_COLUMN should be the first element
+ * (index 0) of the array, TTL_COLUMN should be the second element
+ * (index 1), etc.
+ */
+ enum RecordColumns {
+ TYPE_COLUMN = 0, ///< The RRType of the record (A/NS/TXT etc.)
+ TTL_COLUMN = 1, ///< The TTL of the record (a
+ SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPE
+ ///< the RRSIG covers. In the current implementation,
+ ///< this field is ignored.
+ RDATA_COLUMN = 3 ///< Full text representation of the record's RDATA
+ };
+
+ /// The number of fields the columns array passed to getNextRecord should have
+ static const size_t COLUMN_COUNT = 4;
+
+ /**
+ * \brief Returns a string identifying this dabase backend
+ *
+ * The returned string is mainly intended to be used for
+ * debugging/logging purposes.
+ *
+ * Any implementation is free to choose the exact string content,
+ * but it is advisable to make it a name that is distinguishable
+ * from the others.
+ *
+ * \return the name of the database
+ */
+ virtual const std::string& getDBName() const = 0;
+};
+
+/**
+ * \brief Concrete data source client oriented at database backends.
+ *
+ * This class (together with corresponding versions of ZoneFinder,
+ * ZoneIterator, etc.) translates high-level data source queries to
+ * low-level calls on DatabaseAccessor. It calls multiple queries
+ * if necessary and validates data from the database, allowing the
+ * DatabaseAccessor to be just simple translation to SQL/other
+ * queries to database.
+ *
+ * While it is possible to subclass it for specific database in case
+ * of special needs, it is not expected to be needed. This should just
+ * work as it is with whatever DatabaseAccessor.
+ */
+class DatabaseClient : public DataSourceClient {
+public:
+ /**
+ * \brief Constructor
+ *
+ * It initializes the client with a database.
+ *
+ * \exception isc::InvalidParameter if database is NULL. It might throw
+ * standard allocation exception as well, but doesn't throw anything else.
+ *
+ * \param database The database to use to get data. As the parameter
+ * suggests, the client takes ownership of the database and will
+ * delete it when itself deleted.
+ */
+ DatabaseClient(boost::shared_ptr<DatabaseAccessor> database);
+ /**
+ * \brief Corresponding ZoneFinder implementation
+ *
+ * The zone finder implementation for database data sources. Similarly
+ * to the DatabaseClient, it translates the queries to methods of the
+ * database.
+ *
+ * Application should not come directly in contact with this class
+ * (it should handle it trough generic ZoneFinder pointer), therefore
+ * it could be completely hidden in the .cc file. But it is provided
+ * to allow testing and for rare cases when a database needs slightly
+ * different handling, so it can be subclassed.
+ *
+ * Methods directly corresponds to the ones in ZoneFinder.
+ */
+ class Finder : public ZoneFinder {
+ public:
+ /**
+ * \brief Constructor
+ *
+ * \param database The database (shared with DatabaseClient) to
+ * be used for queries (the one asked for ID before).
+ * \param zone_id The zone ID which was returned from
+ * DatabaseAccessor::getZone and which will be passed to further
+ * calls to the database.
+ * \param origin The name of the origin of this zone. It could query
+ * it from database, but as the DatabaseClient just searched for
+ * the zone using the name, it should have it.
+ */
+ Finder(boost::shared_ptr<DatabaseAccessor> database, int zone_id,
+ const isc::dns::Name& origin);
+ // The following three methods are just implementations of inherited
+ // ZoneFinder's pure virtual methods.
+ virtual isc::dns::Name getOrigin() const;
+ virtual isc::dns::RRClass getClass() const;
+
+ /**
+ * \brief Find an RRset in the datasource
+ *
+ * Searches the datasource for an RRset of the given name and
+ * type. If there is a CNAME at the given name, the CNAME rrset
+ * is returned.
+ * (this implementation is not complete, and currently only
+ * does full matches, CNAMES, and the signatures for matches and
+ * CNAMEs)
+ * \note target was used in the original design to handle ANY
+ * queries. This is not implemented yet, and may use
+ * target again for that, but it might also use something
+ * different. It is left in for compatibility at the moment.
+ * \note options are ignored at this moment
+ *
+ * \note Maybe counter intuitively, this method is not a const member
+ * function. This is intentional; some of the underlying implementations
+ * are expected to use a database backend, and would internally contain
+ * some abstraction of "database connection". In the most strict sense
+ * any (even read only) operation might change the internal state of
+ * such a connection, and in that sense the operation cannot be considered
+ * "const". In order to avoid giving a false sense of safety to the
+ * caller, we indicate a call to this method may have a surprising
+ * side effect. That said, this view may be too strict and it may
+ * make sense to say the internal database connection doesn't affect
+ * external behavior in terms of the interface of this method. As
+ * we gain more experiences with various kinds of backends we may
+ * revisit the constness.
+ *
+ * \exception DataSourceError when there is a problem reading
+ * the data from the dabase backend.
+ * This can be a connection, code, or
+ * data (parse) error.
+ *
+ * \param name The name to find
+ * \param type The RRType to find
+ * \param target Unused at this moment
+ * \param options Options about how to search.
+ * See ZoneFinder::FindOptions.
+ */
+ virtual FindResult find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList* target = NULL,
+ const FindOptions options = FIND_DEFAULT);
+
+ /**
+ * \brief The zone ID
+ *
+ * This function provides the stored zone ID as passed to the
+ * constructor. This is meant for testing purposes and normal
+ * applications shouldn't need it.
+ */
+ int zone_id() const { return (zone_id_); }
+ /**
+ * \brief The database.
+ *
+ * This function provides the database stored inside as
+ * passed to the constructor. This is meant for testing purposes and
+ * normal applications shouldn't need it.
+ */
+ const DatabaseAccessor& database() const {
+ return (*database_);
+ }
+ private:
+ boost::shared_ptr<DatabaseAccessor> database_;
+ const int zone_id_;
+ const isc::dns::Name origin_;
+ /**
+ * \brief Searches database for an RRset
+ *
+ * This method scans RRs of single domain specified by name and finds
+ * RRset with given type or any of redirection RRsets that are
+ * requested.
+ *
+ * This function is used internally by find(), because this part is
+ * called multiple times with slightly different parameters.
+ *
+ * \param name Which domain name should be scanned.
+ * \param type The RRType which is requested. This can be NULL, in
+ * which case the method will look for the redirections only.
+ * \param want_cname If this is true, CNAME redirection may be returned
+ * instead of the RRset with given type. If there's CNAME and
+ * something else or the CNAME has multiple RRs, it throws
+ * DataSourceError.
+ * \param want_dname If this is true, DNAME redirection may be returned
+ * instead. This is with type = NULL only and is not checked in
+ * other circumstances. If the DNAME has multiple RRs, it throws
+ * DataSourceError.
+ * \param want_ns This allows redirection by NS to be returned. If
+ * any other data is met as well, DataSourceError is thrown.
+ * \note It may happen that some of the above error conditions are not
+ * detected in some circumstances. The goal here is not to validate
+ * the domain in DB, but to avoid bad behaviour resulting from
+ * broken data.
+ * \return First part of the result tells if the domain contains any
+ * RRs. This can be used to decide between NXDOMAIN and NXRRSET.
+ * The second part is the RRset found (if any) with any relevant
+ * signatures attached to it.
+ * \todo This interface doesn't look very elegant. Any better idea
+ * would be nice.
+ */
+ std::pair<bool, isc::dns::RRsetPtr> getRRset(const isc::dns::Name&
+ name,
+ const isc::dns::RRType*
+ type,
+ bool want_cname,
+ bool want_dname,
+ bool want_ns);
+ };
+ /**
+ * \brief Find a zone in the database
+ *
+ * This queries database's getZone to find the best matching zone.
+ * It will propagate whatever exceptions are thrown from that method
+ * (which is not restricted in any way).
+ *
+ * \param name Name of the zone or data contained there.
+ * \return FindResult containing the code and an instance of Finder, if
+ * anything is found. However, application should not rely on the
+ * ZoneFinder being instance of Finder (possible subclass of this class
+ * may return something else and it may change in future versions), it
+ * should use it as a ZoneFinder only.
+ */
+ virtual FindResult findZone(const isc::dns::Name& name) const;
+
+private:
+ /// \brief Our database.
+ const boost::shared_ptr<DatabaseAccessor> database_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index 3dc69e0..190adbe 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -63,6 +63,60 @@ The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
means no limit.
+% DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2
+This was an internal error while reading data from a datasource. This can either
+mean the specific data source implementation is not behaving correctly, or the
+data it provides is invalid. The current search is aborted.
+The error message contains specific information about the error.
+
+% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+
+% DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5
+The datasource backend provided resource records for the given RRset with
+different TTL values. The TTL of the RRSET is set to the lowest value, which
+is printed in the log message.
+
+% DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2
+There was an uncaught general exception while reading data from a datasource.
+This most likely points to a logic error in the code, and can be considered a
+bug. The current search is aborted. Specific information about the exception is
+printed in this error message.
+
+% DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2
+There was an uncaught ISC exception while reading data from a datasource. This
+most likely points to a logic error in the code, and can be considered a bug.
+The current search is aborted. Specific information about the exception is
+printed in this error message.
+
+% DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+
+% DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+
+% DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+
+% DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+
+% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+
+% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+
% DATASRC_DO_QUERY handling query for '%1/%2'
A debug message indicating that a query for the given name and RR type is being
processed.
@@ -400,12 +454,22 @@ enough information for it. The code is 1 for error, 2 for not implemented.
% DATASRC_SQLITE_CLOSE closing SQLite database
Debug information. The SQLite data source is closing the database file.
+
+% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
+The database file is being opened so it can start providing data.
+
+% DATASRC_SQLITE_CONNCLOSE Closing sqlite database
+The database file is no longer needed and is being closed.
+
% DATASRC_SQLITE_CREATE SQLite data source created
Debug information. An instance of SQLite data source is being created.
% DATASRC_SQLITE_DESTROY SQLite data source destroyed
Debug information. An instance of SQLite data source is being destroyed.
+% DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized
+The object around a database connection is being destroyed.
+
% DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'
Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
@@ -458,6 +522,9 @@ source.
The SQLite data source was asked to provide a NSEC3 record for given zone.
But it doesn't contain that zone.
+% DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized
+A wrapper object to hold database connection is being initialized.
+
% DATASRC_SQLITE_OPEN opening SQLite database '%1'
Debug information. The SQLite data source is loading an SQLite database in
the provided file.
@@ -496,4 +563,3 @@ data source.
% DATASRC_UNEXPECTED_QUERY_STATE unexpected query state
This indicates a programming error. An internal task of unknown type was
generated.
-
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 3d24ce0..d06cd9b 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -606,19 +606,19 @@ InMemoryZoneFinder::~InMemoryZoneFinder() {
delete impl_;
}
-const Name&
+Name
InMemoryZoneFinder::getOrigin() const {
return (impl_->origin_);
}
-const RRClass&
+RRClass
InMemoryZoneFinder::getClass() const {
return (impl_->zone_class_);
}
ZoneFinder::FindResult
InMemoryZoneFinder::find(const Name& name, const RRType& type,
- RRsetList* target, const FindOptions options) const
+ RRsetList* target, const FindOptions options)
{
return (impl_->find(name, type, target, options));
}
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 9bed960..0234a91 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -58,10 +58,10 @@ public:
//@}
/// \brief Returns the origin of the zone.
- virtual const isc::dns::Name& getOrigin() const;
+ virtual isc::dns::Name getOrigin() const;
/// \brief Returns the class of the zone.
- virtual const isc::dns::RRClass& getClass() const;
+ virtual isc::dns::RRClass getClass() const;
/// \brief Looks up an RRset in the zone.
///
@@ -73,7 +73,7 @@ public:
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
isc::dns::RRsetList* target = NULL,
- const FindOptions options = FIND_DEFAULT) const;
+ const FindOptions options = FIND_DEFAULT);
/// \brief Inserts an rrset into the zone.
///
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
new file mode 100644
index 0000000..817d530
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -0,0 +1,412 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sqlite3.h>
+
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/logger.h>
+#include <datasrc/data_source.h>
+#include <util/filename.h>
+
+namespace isc {
+namespace datasrc {
+
+struct SQLite3Parameters {
+ SQLite3Parameters() :
+ db_(NULL), version_(-1),
+ q_zone_(NULL), q_any_(NULL)
+ /*q_record_(NULL), q_addrs_(NULL), q_referral_(NULL),
+ q_count_(NULL), q_previous_(NULL), q_nsec3_(NULL),
+ q_prevnsec3_(NULL) */
+ {}
+ sqlite3* db_;
+ int version_;
+ sqlite3_stmt* q_zone_;
+ sqlite3_stmt* q_any_;
+ /*
+ TODO: Yet unneeded statements
+ sqlite3_stmt* q_record_;
+ sqlite3_stmt* q_addrs_;
+ sqlite3_stmt* q_referral_;
+ sqlite3_stmt* q_count_;
+ sqlite3_stmt* q_previous_;
+ sqlite3_stmt* q_nsec3_;
+ sqlite3_stmt* q_prevnsec3_;
+ */
+};
+
+SQLite3Database::SQLite3Database(const std::string& filename,
+ const isc::dns::RRClass& rrclass) :
+ dbparameters_(new SQLite3Parameters),
+ class_(rrclass.toText()),
+ database_name_("sqlite3_" +
+ isc::util::Filename(filename).nameAndExtension())
+{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_NEWCONN);
+
+ open(filename);
+}
+
+namespace {
+
+// This is a helper class to initialize a Sqlite3 DB safely. An object of
+// this class encapsulates all temporary resources that are necessary for
+// the initialization, and release them in the destructor. Once everything
+// is properly initialized, the move() method moves the allocated resources
+// to the main object in an exception free manner. This way, the main code
+// for the initialization can be exception safe, and can provide the strong
+// exception guarantee.
+class Initializer {
+public:
+ ~Initializer() {
+ if (params_.q_zone_ != NULL) {
+ sqlite3_finalize(params_.q_zone_);
+ }
+ if (params_.q_any_ != NULL) {
+ sqlite3_finalize(params_.q_any_);
+ }
+ /*
+ if (params_.q_record_ != NULL) {
+ sqlite3_finalize(params_.q_record_);
+ }
+ if (params_.q_addrs_ != NULL) {
+ sqlite3_finalize(params_.q_addrs_);
+ }
+ if (params_.q_referral_ != NULL) {
+ sqlite3_finalize(params_.q_referral_);
+ }
+ if (params_.q_count_ != NULL) {
+ sqlite3_finalize(params_.q_count_);
+ }
+ if (params_.q_previous_ != NULL) {
+ sqlite3_finalize(params_.q_previous_);
+ }
+ if (params_.q_nsec3_ != NULL) {
+ sqlite3_finalize(params_.q_nsec3_);
+ }
+ if (params_.q_prevnsec3_ != NULL) {
+ sqlite3_finalize(params_.q_prevnsec3_);
+ }
+ */
+ if (params_.db_ != NULL) {
+ sqlite3_close(params_.db_);
+ }
+ }
+ void move(SQLite3Parameters* dst) {
+ *dst = params_;
+ params_ = SQLite3Parameters(); // clear everything
+ }
+ SQLite3Parameters params_;
+};
+
+const char* const SCHEMA_LIST[] = {
+ "CREATE TABLE schema_version (version INTEGER NOT NULL)",
+ "INSERT INTO schema_version VALUES (1)",
+ "CREATE TABLE zones (id INTEGER PRIMARY KEY, "
+ "name STRING NOT NULL COLLATE NOCASE, "
+ "rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN', "
+ "dnssec BOOLEAN NOT NULL DEFAULT 0)",
+ "CREATE INDEX zones_byname ON zones (name)",
+ "CREATE TABLE records (id INTEGER PRIMARY KEY, "
+ "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+ "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
+ "CREATE INDEX records_byname ON records (name)",
+ "CREATE INDEX records_byrname ON records (rname)",
+ "CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
+ "hash STRING NOT NULL COLLATE NOCASE, "
+ "owner STRING NOT NULL COLLATE NOCASE, "
+ "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
+ "CREATE INDEX nsec3_byhash ON nsec3 (hash)",
+ NULL
+};
+
+const char* const q_zone_str = "SELECT id FROM zones WHERE name=?1 AND rdclass = ?2";
+
+const char* const q_any_str = "SELECT rdtype, ttl, sigtype, rdata "
+ "FROM records WHERE zone_id=?1 AND name=?2";
+
+/* TODO: Prune the statements, not everything will be needed maybe?
+const char* const q_record_str = "SELECT rdtype, ttl, sigtype, rdata "
+ "FROM records WHERE zone_id=?1 AND name=?2 AND "
+ "((rdtype=?3 OR sigtype=?3) OR "
+ "(rdtype='CNAME' OR sigtype='CNAME') OR "
+ "(rdtype='NS' OR sigtype='NS'))";
+
+const char* const q_addrs_str = "SELECT rdtype, ttl, sigtype, rdata "
+ "FROM records WHERE zone_id=?1 AND name=?2 AND "
+ "(rdtype='A' OR sigtype='A' OR rdtype='AAAA' OR sigtype='AAAA')";
+
+const char* const q_referral_str = "SELECT rdtype, ttl, sigtype, rdata FROM "
+ "records WHERE zone_id=?1 AND name=?2 AND"
+ "(rdtype='NS' OR sigtype='NS' OR rdtype='DS' OR sigtype='DS' OR "
+ "rdtype='DNAME' OR sigtype='DNAME')";
+
+const char* const q_count_str = "SELECT COUNT(*) FROM records "
+ "WHERE zone_id=?1 AND rname LIKE (?2 || '%');";
+
+const char* const q_previous_str = "SELECT name FROM records "
+ "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+ "rname < $2 ORDER BY rname DESC LIMIT 1";
+
+const char* const q_nsec3_str = "SELECT rdtype, ttl, rdata FROM nsec3 "
+ "WHERE zone_id = ?1 AND hash = $2";
+
+const char* const q_prevnsec3_str = "SELECT hash FROM nsec3 "
+ "WHERE zone_id = ?1 AND hash <= $2 ORDER BY hash DESC LIMIT 1";
+ */
+
+sqlite3_stmt*
+prepare(sqlite3* const db, const char* const statement) {
+ sqlite3_stmt* prepared = NULL;
+ if (sqlite3_prepare_v2(db, statement, -1, &prepared, NULL) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not prepare SQLite statement: " <<
+ statement);
+ }
+ return (prepared);
+}
+
+void
+checkAndSetupSchema(Initializer* initializer) {
+ sqlite3* const db = initializer->params_.db_;
+
+ sqlite3_stmt* prepared = NULL;
+ if (sqlite3_prepare_v2(db, "SELECT version FROM schema_version", -1,
+ &prepared, NULL) == SQLITE_OK &&
+ sqlite3_step(prepared) == SQLITE_ROW) {
+ initializer->params_.version_ = sqlite3_column_int(prepared, 0);
+ sqlite3_finalize(prepared);
+ } else {
+ logger.info(DATASRC_SQLITE_SETUP);
+ if (prepared != NULL) {
+ sqlite3_finalize(prepared);
+ }
+ for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
+ if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
+ SQLITE_OK) {
+ isc_throw(SQLite3Error,
+ "Failed to set up schema " << SCHEMA_LIST[i]);
+ }
+ }
+ }
+
+ initializer->params_.q_zone_ = prepare(db, q_zone_str);
+ initializer->params_.q_any_ = prepare(db, q_any_str);
+ /* TODO: Yet unneeded statements
+ initializer->params_.q_record_ = prepare(db, q_record_str);
+ initializer->params_.q_addrs_ = prepare(db, q_addrs_str);
+ initializer->params_.q_referral_ = prepare(db, q_referral_str);
+ initializer->params_.q_count_ = prepare(db, q_count_str);
+ initializer->params_.q_previous_ = prepare(db, q_previous_str);
+ initializer->params_.q_nsec3_ = prepare(db, q_nsec3_str);
+ initializer->params_.q_prevnsec3_ = prepare(db, q_prevnsec3_str);
+ */
+}
+
+}
+
+void
+SQLite3Database::open(const std::string& name) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNOPEN).arg(name);
+ if (dbparameters_->db_ != NULL) {
+ // There shouldn't be a way to trigger this anyway
+ isc_throw(DataSourceError, "Duplicate SQLite open with " << name);
+ }
+
+ Initializer initializer;
+
+ if (sqlite3_open(name.c_str(), &initializer.params_.db_) != 0) {
+ isc_throw(SQLite3Error, "Cannot open SQLite database file: " << name);
+ }
+
+ checkAndSetupSchema(&initializer);
+ initializer.move(dbparameters_);
+}
+
+SQLite3Database::~SQLite3Database() {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_DROPCONN);
+ if (dbparameters_->db_ != NULL) {
+ close();
+ }
+ delete dbparameters_;
+}
+
+void
+SQLite3Database::close(void) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNCLOSE);
+ if (dbparameters_->db_ == NULL) {
+ isc_throw(DataSourceError,
+ "SQLite data source is being closed before open");
+ }
+
+ // XXX: sqlite3_finalize() could fail. What should we do in that case?
+ sqlite3_finalize(dbparameters_->q_zone_);
+ dbparameters_->q_zone_ = NULL;
+
+ sqlite3_finalize(dbparameters_->q_any_);
+ dbparameters_->q_any_ = NULL;
+
+ /* TODO: Once they are needed or not, uncomment or drop
+ sqlite3_finalize(dbparameters->q_record_);
+ dbparameters->q_record_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_addrs_);
+ dbparameters->q_addrs_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_referral_);
+ dbparameters->q_referral_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_count_);
+ dbparameters->q_count_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_previous_);
+ dbparameters->q_previous_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_prevnsec3_);
+ dbparameters->q_prevnsec3_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_nsec3_);
+ dbparameters->q_nsec3_ = NULL;
+ */
+
+ sqlite3_close(dbparameters_->db_);
+ dbparameters_->db_ = NULL;
+}
+
+std::pair<bool, int>
+SQLite3Database::getZone(const isc::dns::Name& name) const {
+ int rc;
+
+ // Take the statement (simple SELECT id FROM zones WHERE...)
+ // and prepare it (bind the parameters to it)
+ sqlite3_reset(dbparameters_->q_zone_);
+ rc = sqlite3_bind_text(dbparameters_->q_zone_, 1, name.toText().c_str(),
+ -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind " << name <<
+ " to SQL statement (zone)");
+ }
+ rc = sqlite3_bind_text(dbparameters_->q_zone_, 2, class_.c_str(), -1,
+ SQLITE_STATIC);
+ if (rc != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind " << class_ <<
+ " to SQL statement (zone)");
+ }
+
+ // Get the data there and see if it found anything
+ rc = sqlite3_step(dbparameters_->q_zone_);
+ std::pair<bool, int> result;
+ if (rc == SQLITE_ROW) {
+ result = std::pair<bool, int>(true,
+ sqlite3_column_int(dbparameters_->
+ q_zone_, 0));
+ } else {
+ result = std::pair<bool, int>(false, 0);
+ }
+ // Free resources
+ sqlite3_reset(dbparameters_->q_zone_);
+
+ return (result);
+}
+
+void
+SQLite3Database::searchForRecords(int zone_id, const std::string& name) {
+ resetSearch();
+ if (sqlite3_bind_int(dbparameters_->q_any_, 1, zone_id) != SQLITE_OK) {
+ isc_throw(DataSourceError,
+ "Error in sqlite3_bind_int() for zone_id " <<
+ zone_id << ": " << sqlite3_errmsg(dbparameters_->db_));
+ }
+ // use transient since name is a ref and may disappear
+ if (sqlite3_bind_text(dbparameters_->q_any_, 2, name.c_str(), -1,
+ SQLITE_TRANSIENT) != SQLITE_OK) {
+ isc_throw(DataSourceError,
+ "Error in sqlite3_bind_text() for name " <<
+ name << ": " << sqlite3_errmsg(dbparameters_->db_));
+ }
+}
+
+namespace {
+// This helper function converts from the unsigned char* type (used by
+// sqlite3) to char* (wanted by std::string). Technically these types
+// might not be directly convertable
+// In case sqlite3_column_text() returns NULL, we just make it an
+// empty string.
+// The sqlite3parameters value is only used to check the error code if
+// ucp == NULL
+const char*
+convertToPlainChar(const unsigned char* ucp,
+ SQLite3Parameters* dbparameters) {
+ if (ucp == NULL) {
+ // The field can really be NULL, in which case we return an
+ // empty string, or sqlite may have run out of memory, in
+ // which case we raise an error
+ if (dbparameters != NULL &&
+ sqlite3_errcode(dbparameters->db_) == SQLITE_NOMEM) {
+ isc_throw(DataSourceError,
+ "Sqlite3 backend encountered a memory allocation "
+ "error in sqlite3_column_text()");
+ } else {
+ return ("");
+ }
+ }
+ const void* p = ucp;
+ return (static_cast<const char*>(p));
+}
+}
+
+bool
+SQLite3Database::getNextRecord(std::string columns[], size_t column_count) {
+ if (column_count != COLUMN_COUNT) {
+ isc_throw(DataSourceError,
+ "Datasource backend caller did not pass a column array "
+ "of size " << COLUMN_COUNT << " to getNextRecord()");
+ }
+
+ sqlite3_stmt* current_stmt = dbparameters_->q_any_;
+ const int rc = sqlite3_step(current_stmt);
+
+ if (rc == SQLITE_ROW) {
+ for (int column = 0; column < column_count; ++column) {
+ try {
+ columns[column] = convertToPlainChar(sqlite3_column_text(
+ current_stmt, column),
+ dbparameters_);
+ } catch (const std::bad_alloc&) {
+ isc_throw(DataSourceError,
+ "bad_alloc in Sqlite3Connection::getNextRecord");
+ }
+ }
+ return (true);
+ } else if (rc == SQLITE_DONE) {
+ // reached the end of matching rows
+ resetSearch();
+ return (false);
+ }
+ isc_throw(DataSourceError, "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ // Compilers might not realize isc_throw always throws
+ return (false);
+}
+
+void
+SQLite3Database::resetSearch() {
+ sqlite3_reset(dbparameters_->q_any_);
+ sqlite3_clear_bindings(dbparameters_->q_any_);
+}
+
+}
+}
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
new file mode 100644
index 0000000..4c2ec8b
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -0,0 +1,160 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#ifndef __DATASRC_SQLITE3_ACCESSOR_H
+#define __DATASRC_SQLITE3_ACCESSOR_H
+
+#include <datasrc/database.h>
+
+#include <exceptions/exceptions.h>
+
+#include <string>
+
+namespace isc {
+namespace dns {
+class RRClass;
+}
+
+namespace datasrc {
+
+/**
+ * \brief Low-level database error
+ *
+ * This exception is thrown when the SQLite library complains about something.
+ * It might mean corrupt database file, invalid request or that something is
+ * rotten in the library.
+ */
+class SQLite3Error : public Exception {
+public:
+ SQLite3Error(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+struct SQLite3Parameters;
+
+/**
+ * \brief Concrete implementation of DatabaseAccessor for SQLite3 databases
+ *
+ * This opens one database file with our schema and serves data from there.
+ * According to the design, it doesn't interpret the data in any way, it just
+ * provides unified access to the DB.
+ */
+class SQLite3Database : public DatabaseAccessor {
+public:
+ /**
+ * \brief Constructor
+ *
+ * This opens the database and becomes ready to serve data from there.
+ *
+ * \exception SQLite3Error will be thrown if the given database file
+ * doesn't work (it is broken, doesn't exist and can't be created, etc).
+ *
+ * \param filename The database file to be used.
+ * \param rrclass Which class of data it should serve (while the database
+ * file can contain multiple classes of data, single database can
+ * provide only one class).
+ */
+ SQLite3Database(const std::string& filename,
+ const isc::dns::RRClass& rrclass);
+ /**
+ * \brief Destructor
+ *
+ * Closes the database.
+ */
+ ~SQLite3Database();
+ /**
+ * \brief Look up a zone
+ *
+ * This implements the getZone from DatabaseAccessor and looks up a zone
+ * in the data. It looks for a zone with the exact given origin and class
+ * passed to the constructor.
+ *
+ * \exception SQLite3Error if something about the database is broken.
+ *
+ * \param name The name of zone to look up
+ * \return The pair contains if the lookup was successful in the first
+ * element and the zone id in the second if it was.
+ */
+ virtual std::pair<bool, int> getZone(const isc::dns::Name& name) const;
+
+ /**
+ * \brief Start a new search for the given name in the given zone.
+ *
+ * This implements the searchForRecords from DatabaseConnection.
+ * This particular implementation does not raise DataSourceError.
+ *
+ * \exception DataSourceError when sqlite3_bind_int() or
+ * sqlite3_bind_text() fails
+ *
+ * \param zone_id The zone to seach in, as returned by getZone()
+ * \param name The name to find records for
+ */
+ virtual void searchForRecords(int zone_id, const std::string& name);
+
+ /**
+ * \brief Retrieve the next record from the search started with
+ * searchForRecords
+ *
+ * This implements the getNextRecord from DatabaseConnection.
+ * See the documentation there for more information.
+ *
+ * If this method raises an exception, the contents of columns are undefined.
+ *
+ * \exception DataSourceError if there is an error returned by sqlite_step()
+ * When this exception is raised, the current
+ * search as initialized by searchForRecords() is
+ * NOT reset, and the caller is expected to take
+ * care of that.
+ * \param columns This vector will be cleared, and the fields of the record will
+ * be appended here as strings (in the order rdtype, ttl, sigtype,
+ * and rdata). If there was no data (i.e. if this call returns
+ * false), the vector is untouched.
+ * \return true if there was a next record, false if there was not
+ */
+ virtual bool getNextRecord(std::string columns[], size_t column_count);
+
+ /**
+ * \brief Resets any state created by searchForRecords
+ *
+ * This implements the resetSearch from DatabaseConnection.
+ * See the documentation there for more information.
+ *
+ * This function never throws.
+ */
+ virtual void resetSearch();
+
+ /// The SQLite3 implementation of this method returns a string starting
+ /// with a fixed prefix of "sqlite3_" followed by the DB file name
+ /// removing any path name. For example, for the DB file
+ /// /somewhere/in/the/system/bind10.sqlite3, this method will return
+ /// "sqlite3_bind10.sqlite3".
+ virtual const std::string& getDBName() const { return (database_name_); }
+
+private:
+ /// \brief Private database data
+ SQLite3Parameters* dbparameters_;
+ /// \brief The class for which the queries are done
+ const std::string class_;
+ /// \brief Opens the database
+ void open(const std::string& filename);
+ /// \brief Closes the database
+ void close();
+ const std::string database_name_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index ffedb75..1a65f82 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -28,6 +28,8 @@ run_unittests_SOURCES += rbtree_unittest.cc
run_unittests_SOURCES += zonetable_unittest.cc
run_unittests_SOURCES += memory_datasrc_unittest.cc
run_unittests_SOURCES += logger_unittest.cc
+run_unittests_SOURCES += database_unittest.cc
+run_unittests_SOURCES += sqlite3_accessor_unittest.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
new file mode 100644
index 0000000..8fad14b
--- /dev/null
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -0,0 +1,943 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/name.h>
+#include <dns/rrttl.h>
+#include <dns/rrset.h>
+#include <exceptions/exceptions.h>
+
+#include <datasrc/database.h>
+#include <datasrc/zone.h>
+#include <datasrc/data_source.h>
+
+#include <testutils/dnsmessage_test.h>
+
+#include <map>
+
+using namespace isc::datasrc;
+using namespace std;
+using namespace boost;
+using isc::dns::Name;
+
+namespace {
+
+/*
+ * A virtual database database that pretends it contains single zone --
+ * example.org.
+ */
+class MockAccessor : public DatabaseAccessor {
+public:
+ MockAccessor() : search_running_(false),
+ database_name_("mock_database")
+ {
+ fillData();
+ }
+
+ virtual std::pair<bool, int> getZone(const Name& name) const {
+ if (name == Name("example.org")) {
+ return (std::pair<bool, int>(true, 42));
+ } else {
+ return (std::pair<bool, int>(false, 0));
+ }
+ }
+
+ virtual void searchForRecords(int zone_id, const std::string& name) {
+ search_running_ = true;
+
+ // 'hardcoded' name to trigger exceptions (for testing
+ // the error handling of find() (the other on is below in
+ // if the name is "exceptiononsearch" it'll raise an exception here
+ if (name == "dsexception.in.search.") {
+ isc_throw(DataSourceError, "datasource exception on search");
+ } else if (name == "iscexception.in.search.") {
+ isc_throw(isc::Exception, "isc exception on search");
+ } else if (name == "basicexception.in.search.") {
+ throw std::exception();
+ }
+ searched_name_ = name;
+
+ // we're not aiming for efficiency in this test, simply
+ // copy the relevant vector from records
+ cur_record = 0;
+ if (zone_id == 42) {
+ if (records.count(name) > 0) {
+ cur_name = records.find(name)->second;
+ } else {
+ cur_name.clear();
+ }
+ } else {
+ cur_name.clear();
+ }
+ };
+
+ virtual bool getNextRecord(std::string columns[], size_t column_count) {
+ if (searched_name_ == "dsexception.in.getnext.") {
+ isc_throw(DataSourceError, "datasource exception on getnextrecord");
+ } else if (searched_name_ == "iscexception.in.getnext.") {
+ isc_throw(isc::Exception, "isc exception on getnextrecord");
+ } else if (searched_name_ == "basicexception.in.getnext.") {
+ throw std::exception();
+ }
+
+ if (column_count != DatabaseAccessor::COLUMN_COUNT) {
+ isc_throw(DataSourceError, "Wrong column count in getNextRecord");
+ }
+ if (cur_record < cur_name.size()) {
+ for (size_t i = 0; i < column_count; ++i) {
+ columns[i] = cur_name[cur_record][i];
+ }
+ cur_record++;
+ return (true);
+ } else {
+ resetSearch();
+ return (false);
+ }
+ };
+
+ virtual void resetSearch() {
+ search_running_ = false;
+ };
+
+ bool searchRunning() const {
+ return (search_running_);
+ }
+
+ virtual const std::string& getDBName() const {
+ return (database_name_);
+ }
+private:
+ std::map<std::string, std::vector< std::vector<std::string> > > records;
+ // used as internal index for getNextRecord()
+ size_t cur_record;
+ // used as temporary storage after searchForRecord() and during
+ // getNextRecord() calls, as well as during the building of the
+ // fake data
+ std::vector< std::vector<std::string> > cur_name;
+
+ // This boolean is used to make sure find() calls resetSearch
+ // when it encounters an error
+ bool search_running_;
+
+ // We store the name passed to searchForRecords, so we can
+ // hardcode some exceptions into getNextRecord
+ std::string searched_name_;
+
+ const std::string database_name_;
+
+ // Adds one record to the current name in the database
+ // The actual data will not be added to 'records' until
+ // addCurName() is called
+ void addRecord(const std::string& name,
+ const std::string& type,
+ const std::string& sigtype,
+ const std::string& rdata) {
+ std::vector<std::string> columns;
+ columns.push_back(name);
+ columns.push_back(type);
+ columns.push_back(sigtype);
+ columns.push_back(rdata);
+ cur_name.push_back(columns);
+ }
+
+ // Adds all records we just built with calls to addRecords
+ // to the actual fake database. This will clear cur_name,
+ // so we can immediately start adding new records.
+ void addCurName(const std::string& name) {
+ ASSERT_EQ(0, records.count(name));
+ records[name] = cur_name;
+ cur_name.clear();
+ }
+
+ // Fills the database with zone data.
+ // This method constructs a number of resource records (with addRecord),
+ // which will all be added for one domain name to the fake database
+ // (with addCurName). So for instance the first set of calls create
+ // data for the name 'www.example.org', which will consist of one A RRset
+ // of one record, and one AAAA RRset of two records.
+ // The order in which they are added is the order in which getNextRecord()
+ // will return them (so we can test whether find() etc. support data that
+ // might not come in 'normal' order)
+ // It shall immediately fail if you try to add the same name twice.
+ void fillData() {
+ // some plain data
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("AAAA", "3600", "", "2001:db8::1");
+ addRecord("AAAA", "3600", "", "2001:db8::2");
+ addCurName("www.example.org.");
+
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("AAAA", "3600", "", "2001:db8::1");
+ addRecord("A", "3600", "", "192.0.2.2");
+ addCurName("www2.example.org.");
+
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addCurName("cname.example.org.");
+
+ // some DNSSEC-'signed' data
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ addRecord("AAAA", "3600", "", "2001:db8::1");
+ addRecord("AAAA", "3600", "", "2001:db8::2");
+ addRecord("RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("signed1.example.org.");
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("signedcname1.example.org.");
+ // special case might fail; sig is for cname, which isn't there (should be ignored)
+ // (ignoring of 'normal' other type is done above by www.)
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("acnamesig1.example.org.");
+
+ // let's pretend we have a database that is not careful
+ // about the order in which it returns data
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("AAAA", "3600", "", "2001:db8::2");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("AAAA", "3600", "", "2001:db8::1");
+ addCurName("signed2.example.org.");
+ addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addCurName("signedcname2.example.org.");
+
+ addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("acnamesig2.example.org.");
+
+ addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("acnamesig3.example.org.");
+
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("A", "360", "", "192.0.2.2");
+ addCurName("ttldiff1.example.org.");
+ addRecord("A", "360", "", "192.0.2.1");
+ addRecord("A", "3600", "", "192.0.2.2");
+ addCurName("ttldiff2.example.org.");
+
+ // also add some intentionally bad data
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addCurName("badcname1.example.org.");
+
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("badcname2.example.org.");
+
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addRecord("CNAME", "3600", "", "www.example2.org.");
+ addCurName("badcname3.example.org.");
+
+ addRecord("A", "3600", "", "bad");
+ addCurName("badrdata.example.org.");
+
+ addRecord("BAD_TYPE", "3600", "", "192.0.2.1");
+ addCurName("badtype.example.org.");
+
+ addRecord("A", "badttl", "", "192.0.2.1");
+ addCurName("badttl.example.org.");
+
+ addRecord("A", "badttl", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 somebaddata 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("badsig.example.org.");
+
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "TXT", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("badsigtype.example.org.");
+
+ // Data for testing delegation (with NS and DNAME)
+ addRecord("NS", "3600", "", "ns.example.com.");
+ addRecord("NS", "3600", "", "ns.delegation.example.org.");
+ addRecord("RRSIG", "3600", "", "NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("delegation.example.org.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("ns.delegation.example.org.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("deep.below.delegation.example.org.");
+
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("DNAME", "3600", "", "dname.example.com.");
+ addRecord("RRSIG", "3600", "", "DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("dname.example.org.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("below.dname.example.org.");
+
+ // Broken NS
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("NS", "3600", "", "ns.example.com.");
+ addCurName("brokenns1.example.org.");
+ addRecord("NS", "3600", "", "ns.example.com.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("brokenns2.example.org.");
+
+ // Now double DNAME, to test failure mode
+ addRecord("DNAME", "3600", "", "dname1.example.com.");
+ addRecord("DNAME", "3600", "", "dname2.example.com.");
+ addCurName("baddname.example.org.");
+
+ // Put some data into apex (including NS) so we can check our NS
+ // doesn't break anything
+ addRecord("NS", "3600", "", "ns.example.com.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("example.org.");
+ }
+};
+
+class DatabaseClientTest : public ::testing::Test {
+public:
+ DatabaseClientTest() {
+ createClient();
+ }
+ /*
+ * We initialize the client from a function, so we can call it multiple
+ * times per test.
+ */
+ void createClient() {
+ current_database_ = new MockAccessor();
+ client_.reset(new DatabaseClient(shared_ptr<DatabaseAccessor>(
+ current_database_)));
+ }
+ // Will be deleted by client_, just keep the current value for comparison.
+ MockAccessor* current_database_;
+ shared_ptr<DatabaseClient> client_;
+ const std::string database_name_;
+
+ /**
+ * Check the zone finder is a valid one and references the zone ID and
+ * database available here.
+ */
+ void checkZoneFinder(const DataSourceClient::FindResult& zone) {
+ ASSERT_NE(ZoneFinderPtr(), zone.zone_finder) << "No zone finder";
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ ASSERT_NE(shared_ptr<DatabaseClient::Finder>(), finder) <<
+ "Wrong type of finder";
+ EXPECT_EQ(42, finder->zone_id());
+ EXPECT_EQ(current_database_, &finder->database());
+ }
+
+ shared_ptr<DatabaseClient::Finder> getFinder() {
+ DataSourceClient::FindResult zone(
+ client_->findZone(Name("example.org")));
+ EXPECT_EQ(result::SUCCESS, zone.code);
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ EXPECT_EQ(42, finder->zone_id());
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ return (finder);
+ }
+
+ std::vector<std::string> expected_rdatas_;
+ std::vector<std::string> expected_sig_rdatas_;
+};
+
+TEST_F(DatabaseClientTest, zoneNotFound) {
+ DataSourceClient::FindResult zone(client_->findZone(Name("example.com")));
+ EXPECT_EQ(result::NOTFOUND, zone.code);
+}
+
+TEST_F(DatabaseClientTest, exactZone) {
+ DataSourceClient::FindResult zone(client_->findZone(Name("example.org")));
+ EXPECT_EQ(result::SUCCESS, zone.code);
+ checkZoneFinder(zone);
+}
+
+TEST_F(DatabaseClientTest, superZone) {
+ DataSourceClient::FindResult zone(client_->findZone(Name(
+ "sub.example.org")));
+ EXPECT_EQ(result::PARTIALMATCH, zone.code);
+ checkZoneFinder(zone);
+}
+
+TEST_F(DatabaseClientTest, noAccessorException) {
+ // We need a dummy variable here; some compiler would regard it a mere
+ // declaration instead of an instantiation and make the test fail.
+ EXPECT_THROW(DatabaseClient dummy((shared_ptr<DatabaseAccessor>())),
+ isc::InvalidParameter);
+}
+
+namespace {
+// checks if the given rrset matches the
+// given name, class, type and rdatas
+void
+checkRRset(isc::dns::ConstRRsetPtr rrset,
+ const isc::dns::Name& name,
+ const isc::dns::RRClass& rrclass,
+ const isc::dns::RRType& rrtype,
+ const isc::dns::RRTTL& rrttl,
+ const std::vector<std::string>& rdatas) {
+ isc::dns::RRsetPtr expected_rrset(
+ new isc::dns::RRset(name, rrclass, rrtype, rrttl));
+ for (unsigned int i = 0; i < rdatas.size(); ++i) {
+ expected_rrset->addRdata(
+ isc::dns::rdata::createRdata(rrtype, rrclass,
+ rdatas[i]));
+ }
+ isc::testutils::rrsetCheck(expected_rrset, rrset);
+}
+
+void
+doFindTest(shared_ptr<DatabaseClient::Finder> finder,
+ const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ const isc::dns::RRType& expected_type,
+ const isc::dns::RRTTL expected_ttl,
+ ZoneFinder::Result expected_result,
+ const std::vector<std::string>& expected_rdatas,
+ const std::vector<std::string>& expected_sig_rdatas,
+ const isc::dns::Name& expected_name = isc::dns::Name::ROOT_NAME(),
+ const ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT)
+{
+ SCOPED_TRACE("doFindTest " + name.toText() + " " + type.toText());
+ ZoneFinder::FindResult result =
+ finder->find(name, type, NULL, options);
+ ASSERT_EQ(expected_result, result.code) << name << " " << type;
+ if (expected_rdatas.size() > 0) {
+ checkRRset(result.rrset, expected_name != Name(".") ? expected_name :
+ name, finder->getClass(), expected_type, expected_ttl,
+ expected_rdatas);
+
+ if (expected_sig_rdatas.size() > 0) {
+ checkRRset(result.rrset->getRRsig(), expected_name != Name(".") ?
+ expected_name : name, finder->getClass(),
+ isc::dns::RRType::RRSIG(), expected_ttl,
+ expected_sig_rdatas);
+ } else {
+ EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset->getRRsig());
+ }
+ } else {
+ EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset);
+ }
+}
+} // end anonymous namespace
+
+TEST_F(DatabaseClientTest, find) {
+ shared_ptr<DatabaseClient::Finder> finder(getFinder());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(finder, isc::dns::Name("www2.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("2001:db8::1");
+ expected_rdatas_.push_back("2001:db8::2");
+ doFindTest(finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::NXRRSET,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("www.example.org.");
+ doFindTest(finder, isc::dns::Name("cname.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::CNAME(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::CNAME,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("www.example.org.");
+ doFindTest(finder, isc::dns::Name("cname.example.org."),
+ isc::dns::RRType::CNAME(), isc::dns::RRType::CNAME(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("doesnotexist.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::NXDOMAIN,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("2001:db8::1");
+ expected_rdatas_.push_back("2001:db8::2");
+ expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::NXRRSET,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("www.example.org.");
+ expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signedcname1.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::CNAME(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::CNAME,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("2001:db8::2");
+ expected_rdatas_.push_back("2001:db8::1");
+ expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::NXRRSET,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("www.example.org.");
+ expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signedcname2.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::CNAME(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::CNAME,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("acnamesig1.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("acnamesig2.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("acnamesig3.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(finder, isc::dns::Name("ttldiff1.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(360),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(finder, isc::dns::Name("ttldiff2.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(360),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname1.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname2.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname3.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("badrdata.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("badtype.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("badttl.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("badsig.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ // Trigger the hardcoded exceptions and see if find() has cleaned up
+ EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.search."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.search."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.search."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ std::exception);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.getnext."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.getnext."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.getnext."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ std::exception);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ // This RRSIG has the wrong sigtype field, which should be
+ // an error if we decide to keep using that field
+ // Right now the field is ignored, so it does not error
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("badsigtype.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+}
+
+TEST_F(DatabaseClientTest, findDelegation) {
+ shared_ptr<DatabaseClient::Finder> finder(getFinder());
+
+ // The apex should not be considered delegation point and we can access
+ // data
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(finder, isc::dns::Name("example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS, expected_rdatas_,
+ expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ expected_rdatas_.clear();
+ expected_rdatas_.push_back("ns.example.com.");
+ expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS, expected_rdatas_,
+ expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ // Check when we ask for something below delegation point, we get the NS
+ // (Both when the RRset there exists and doesn't)
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("ns.example.com.");
+ expected_rdatas_.push_back("ns.delegation.example.org.");
+ expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::DELEGATION, expected_rdatas_,
+ expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ EXPECT_FALSE(current_database_->searchRunning());
+ doFindTest(finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::DELEGATION, expected_rdatas_,
+ expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ doFindTest(finder, isc::dns::Name("deep.below.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::DELEGATION, expected_rdatas_,
+ expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ // Even when we check directly at the delegation point, we should get
+ // the NS
+ doFindTest(finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::DELEGATION, expected_rdatas_,
+ expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ // And when we ask direcly for the NS, we should still get delegation
+ doFindTest(finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::DELEGATION, expected_rdatas_,
+ expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ // Now test delegation. If it is below the delegation point, we should get
+ // the DNAME (the one with data under DNAME is invalid zone, but we test
+ // the behaviour anyway just to make sure)
+ expected_rdatas_.clear();
+ expected_rdatas_.push_back("dname.example.com.");
+ expected_sig_rdatas_.clear();
+ expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::DNAME, expected_rdatas_,
+ expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ EXPECT_FALSE(current_database_->searchRunning());
+ doFindTest(finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::DNAME, expected_rdatas_,
+ expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ EXPECT_FALSE(current_database_->searchRunning());
+ doFindTest(finder, isc::dns::Name("really.deep.below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::DNAME, expected_rdatas_,
+ expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ // Asking direcly for DNAME should give SUCCESS
+ doFindTest(finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::DNAME(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS, expected_rdatas_,
+ expected_sig_rdatas_);
+
+ // But we don't delegate at DNAME point
+ expected_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS, expected_rdatas_,
+ expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+ expected_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600), ZoneFinder::NXRRSET, expected_rdatas_,
+ expected_sig_rdatas_);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ // This is broken dname, it contains two targets
+ EXPECT_THROW(finder->find(isc::dns::Name("below.baddname.example.org."),
+ isc::dns::RRType::A(), NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+
+ // Broken NS - it lives together with something else
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("brokenns1.example.org."),
+ isc::dns::RRType::A(), NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("brokenns2.example.org."),
+ isc::dns::RRType::A(), NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_FALSE(current_database_->searchRunning());
+}
+
+// Glue-OK mode. Just go trough NS delegations down (but not trough
+// DNAME) and pretend it is not there.
+TEST_F(DatabaseClientTest, glueOK) {
+ shared_ptr<DatabaseClient::Finder> finder(getFinder());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600), ZoneFinder::NXRRSET,
+ expected_rdatas_, expected_sig_rdatas_,
+ isc::dns::Name("ns.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ doFindTest(finder, isc::dns::Name("nothere.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600), ZoneFinder::NXDOMAIN,
+ expected_rdatas_, expected_sig_rdatas_,
+ isc::dns::Name("nothere.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_,
+ isc::dns::Name("ns.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ expected_rdatas_.clear();
+ expected_rdatas_.push_back("ns.example.com.");
+ expected_rdatas_.push_back("ns.delegation.example.org.");
+ expected_sig_rdatas_.clear();
+ expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ // When we request the NS, it should be SUCCESS, not DELEGATION
+ // (different in GLUE_OK)
+ doFindTest(finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ expected_rdatas_.clear();
+ expected_rdatas_.push_back("dname.example.com.");
+ expected_sig_rdatas_.clear();
+ expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::DNAME, expected_rdatas_,
+ expected_sig_rdatas_, isc::dns::Name("dname.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ EXPECT_FALSE(current_database_->searchRunning());
+ doFindTest(finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::DNAME, expected_rdatas_,
+ expected_sig_rdatas_, isc::dns::Name("dname.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ EXPECT_FALSE(current_database_->searchRunning());
+}
+
+TEST_F(DatabaseClientTest, getOrigin) {
+ DataSourceClient::FindResult zone(client_->findZone(Name("example.org")));
+ ASSERT_EQ(result::SUCCESS, zone.code);
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ EXPECT_EQ(42, finder->zone_id());
+ EXPECT_EQ(isc::dns::Name("example.org"), finder->getOrigin());
+}
+
+}
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
new file mode 100644
index 0000000..097c821
--- /dev/null
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -0,0 +1,245 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+#include <datasrc/sqlite3_accessor.h>
+
+#include <datasrc/data_source.h>
+
+#include <dns/rrclass.h>
+
+#include <gtest/gtest.h>
+#include <boost/scoped_ptr.hpp>
+
+using namespace isc::datasrc;
+using isc::data::ConstElementPtr;
+using isc::data::Element;
+using isc::dns::RRClass;
+using isc::dns::Name;
+
+namespace {
+// Some test data
+std::string SQLITE_DBFILE_EXAMPLE = TEST_DATA_DIR "/test.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE2 = TEST_DATA_DIR "/example2.com.sqlite3";
+std::string SQLITE_DBNAME_EXAMPLE2 = "sqlite3_example2.com.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE_ROOT = TEST_DATA_DIR "/test-root.sqlite3";
+std::string SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
+std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
+std::string SQLITE_DBFILE_MEMORY = ":memory:";
+
+// The following file must be non existent and must be non"creatable";
+// the sqlite3 library will try to create a new DB file if it doesn't exist,
+// so to test a failure case the create operation should also fail.
+// The "nodir", a non existent directory, is inserted for this purpose.
+std::string SQLITE_DBFILE_NOTEXIST = TEST_DATA_DIR "/nodir/notexist";
+
+// Opening works (the content is tested in different tests)
+TEST(SQLite3Open, common) {
+ EXPECT_NO_THROW(SQLite3Database db(SQLITE_DBFILE_EXAMPLE,
+ RRClass::IN()));
+}
+
+// The file can't be opened
+TEST(SQLite3Open, notExist) {
+ EXPECT_THROW(SQLite3Database db(SQLITE_DBFILE_NOTEXIST,
+ RRClass::IN()), SQLite3Error);
+}
+
+// It rejects broken DB
+TEST(SQLite3Open, brokenDB) {
+ EXPECT_THROW(SQLite3Database db(SQLITE_DBFILE_BROKENDB,
+ RRClass::IN()), SQLite3Error);
+}
+
+// Test we can create the schema on the fly
+TEST(SQLite3Open, memoryDB) {
+ EXPECT_NO_THROW(SQLite3Database db(SQLITE_DBFILE_MEMORY,
+ RRClass::IN()));
+}
+
+// Test fixture for querying the db
+class SQLite3Access : public ::testing::Test {
+public:
+ SQLite3Access() {
+ initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::IN());
+ }
+ // So it can be re-created with different data
+ void initAccessor(const std::string& filename, const RRClass& rrclass) {
+ db.reset(new SQLite3Database(filename, rrclass));
+ }
+ // The tested dbection
+ boost::scoped_ptr<SQLite3Database> db;
+};
+
+// This zone exists in the data, so it should be found
+TEST_F(SQLite3Access, getZone) {
+ std::pair<bool, int> result(db->getZone(Name("example.com")));
+ EXPECT_TRUE(result.first);
+ EXPECT_EQ(1, result.second);
+}
+
+// But it should find only the zone, nothing below it
+TEST_F(SQLite3Access, subZone) {
+ EXPECT_FALSE(db->getZone(Name("sub.example.com")).first);
+}
+
+// This zone is not there at all
+TEST_F(SQLite3Access, noZone) {
+ EXPECT_FALSE(db->getZone(Name("example.org")).first);
+}
+
+// This zone is there, but in different class
+TEST_F(SQLite3Access, noClass) {
+ initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::CH());
+ EXPECT_FALSE(db->getZone(Name("example.com")).first);
+}
+
+TEST(SQLite3Open, getDBNameExample2) {
+ SQLite3Database db(SQLITE_DBFILE_EXAMPLE2, RRClass::IN());
+ EXPECT_EQ(SQLITE_DBNAME_EXAMPLE2, db.getDBName());
+}
+
+TEST(SQLite3Open, getDBNameExampleROOT) {
+ SQLite3Database db(SQLITE_DBFILE_EXAMPLE_ROOT, RRClass::IN());
+ EXPECT_EQ(SQLITE_DBNAME_EXAMPLE_ROOT, db.getDBName());
+}
+
+// Simple function to cound the number of records for
+// any name
+void
+checkRecordRow(const std::string columns[],
+ const std::string& field0,
+ const std::string& field1,
+ const std::string& field2,
+ const std::string& field3)
+{
+ EXPECT_EQ(field0, columns[0]);
+ EXPECT_EQ(field1, columns[1]);
+ EXPECT_EQ(field2, columns[2]);
+ EXPECT_EQ(field3, columns[3]);
+}
+
+TEST_F(SQLite3Access, getRecords) {
+ const std::pair<bool, int> zone_info(db->getZone(Name("example.com")));
+ ASSERT_TRUE(zone_info.first);
+
+ const int zone_id = zone_info.second;
+ ASSERT_EQ(1, zone_id);
+
+ const size_t column_count = DatabaseAccessor::COLUMN_COUNT;
+ std::string columns[column_count];
+
+ // without search, getNext() should return false
+ EXPECT_FALSE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "", "", "", "");
+
+ db->searchForRecords(zone_id, "foo.bar.");
+ EXPECT_FALSE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "", "", "", "");
+
+ db->searchForRecords(zone_id, "");
+ EXPECT_FALSE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "", "", "", "");
+
+ // Should error on a bad number of columns
+ EXPECT_THROW(db->getNextRecord(columns, 3), DataSourceError);
+ EXPECT_THROW(db->getNextRecord(columns, 5), DataSourceError);
+
+ // now try some real searches
+ db->searchForRecords(zone_id, "foo.example.com.");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "CNAME", "3600", "",
+ "cnametest.example.org.");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "RRSIG", "3600", "CNAME",
+ "CNAME 5 3 3600 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "NSEC", "7200", "",
+ "mail.example.com. CNAME RRSIG NSEC");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 3 7200 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE");
+ EXPECT_FALSE(db->getNextRecord(columns, column_count));
+ // with no more records, the array should not have been modified
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 3 7200 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE");
+
+ db->searchForRecords(zone_id, "example.com.");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "SOA", "3600", "",
+ "master.example.com. admin.example.com. "
+ "1234 3600 1800 2419200 7200");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "RRSIG", "3600", "SOA",
+ "SOA 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "NS", "1200", "", "dns01.example.com.");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "NS", "3600", "", "dns02.example.com.");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "NS", "1800", "", "dns03.example.com.");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "RRSIG", "3600", "NS",
+ "NS 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "MX", "3600", "", "10 mail.example.com.");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "MX", "3600", "",
+ "20 mail.subzone.example.com.");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "RRSIG", "3600", "MX",
+ "MX 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "NSEC", "7200", "",
+ "cname-ext.example.com. NS SOA MX RRSIG NSEC DNSKEY");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 2 7200 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "DNSKEY", "3600", "",
+ "256 3 5 AwEAAcOUBllYc1hf7ND9uDy+Yz1BF3sI0m4q NGV7W"
+ "cTD0WEiuV7IjXgHE36fCmS9QsUxSSOV o1I/FMxI2PJVqTYHkX"
+ "FBS7AzLGsQYMU7UjBZ SotBJ6Imt5pXMu+lEDNy8TOUzG3xm7g"
+ "0qcbW YF6qCEfvZoBtAqi5Rk7Mlrqs8agxYyMx");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "DNSKEY", "3600", "",
+ "257 3 5 AwEAAe5WFbxdCPq2jZrZhlMj7oJdff3W7syJ tbvzg"
+ "62tRx0gkoCDoBI9DPjlOQG0UAbj+xUV 4HQZJStJaZ+fHU5AwV"
+ "NT+bBZdtV+NujSikhd THb4FYLg2b3Cx9NyJvAVukHp/91HnWu"
+ "G4T36 CzAFrfPwsHIrBz9BsaIQ21VRkcmj7DswfI/i DGd8j6b"
+ "qiODyNZYQ+ZrLmF0KIJ2yPN3iO6Zq 23TaOrVTjB7d1a/h31OD"
+ "fiHAxFHrkY3t3D5J R9Nsl/7fdRmSznwtcSDgLXBoFEYmw6p86"
+ "Acv RyoYNcL1SXjaKVLG5jyU3UR+LcGZT5t/0xGf oIK/aKwEN"
+ "rsjcKZZj660b1M=");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "4456 example.com. FAKEFAKEFAKEFAKE");
+ ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE");
+ EXPECT_FALSE(db->getNextRecord(columns, column_count));
+ // getnextrecord returning false should mean array is not altered
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE");
+}
+
+} // end anonymous namespace
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index 69785f0..0dacc5d 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -131,10 +131,10 @@ public:
/// These methods should never throw an exception.
//@{
/// Return the origin name of the zone.
- virtual const isc::dns::Name& getOrigin() const = 0;
+ virtual isc::dns::Name getOrigin() const = 0;
/// Return the RR class of the zone.
- virtual const isc::dns::RRClass& getClass() const = 0;
+ virtual isc::dns::RRClass getClass() const = 0;
//@}
///
@@ -197,7 +197,7 @@ public:
const isc::dns::RRType& type,
isc::dns::RRsetList* target = NULL,
const FindOptions options
- = FIND_DEFAULT) const = 0;
+ = FIND_DEFAULT) = 0;
//@}
};
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 4a0173c..43737a9 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -51,6 +51,8 @@ EXTRA_DIST += rdata/generic/soa_6.cc
EXTRA_DIST += rdata/generic/soa_6.h
EXTRA_DIST += rdata/generic/txt_16.cc
EXTRA_DIST += rdata/generic/txt_16.h
+EXTRA_DIST += rdata/generic/afsdb_18.cc
+EXTRA_DIST += rdata/generic/afsdb_18.h
EXTRA_DIST += rdata/hs_4/a_1.cc
EXTRA_DIST += rdata/hs_4/a_1.h
EXTRA_DIST += rdata/in_1/a_1.cc
diff --git a/src/lib/dns/rdata/generic/afsdb_18.cc b/src/lib/dns/rdata/generic/afsdb_18.cc
new file mode 100644
index 0000000..dd7fa5f
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.cc
@@ -0,0 +1,170 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+#include <util/strutil.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <boost/lexical_cast.hpp>
+
+using namespace std;
+using namespace isc::util::str;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c afsdb_str must be formatted as follows:
+/// \code <subtype> <server name>
+/// \endcode
+/// where server name field must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "1 server.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+AFSDB::AFSDB(const std::string& afsdb_str) :
+ subtype_(0), server_(Name::ROOT_NAME())
+{
+ istringstream iss(afsdb_str);
+
+ try {
+ const uint32_t subtype = tokenToNum<int32_t, 16>(getToken(iss));
+ const Name servername(getToken(iss));
+ string server;
+
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Unexpected input for AFSDB"
+ "RDATA: " << afsdb_str);
+ }
+
+ subtype_ = subtype;
+ server_ = servername;
+
+ } catch (const StringTokenError& ste) {
+ isc_throw(InvalidRdataText, "Invalid AFSDB text: " <<
+ ste.what() << ": " << afsdb_str);
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+AFSDB::AFSDB(InputBuffer& buffer, size_t) :
+ subtype_(buffer.readUint16()), server_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+AFSDB::AFSDB(const AFSDB& other) :
+ Rdata(), subtype_(other.subtype_), server_(other.server_)
+{}
+
+AFSDB&
+AFSDB::operator=(const AFSDB& source) {
+ subtype_ = source.subtype_;
+ server_ = source.server_;
+
+ return (*this);
+}
+
+/// \brief Convert the \c AFSDB to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c AFSDB(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c AFSDB object.
+string
+AFSDB::toText() const {
+ return (boost::lexical_cast<string>(subtype_) + " " + server_.toText());
+}
+
+/// \brief Render the \c AFSDB in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+AFSDB::toWire(OutputBuffer& buffer) const {
+ buffer.writeUint16(subtype_);
+ server_.toWire(buffer);
+}
+
+/// \brief Render the \c AFSDB in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE AFSDB is not "well-known", the server
+/// field (domain name) will not be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+AFSDB::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeUint16(subtype_);
+ renderer.writeName(server_, false);
+}
+
+/// \brief Compare two instances of \c AFSDB RDATA.
+///
+/// See documentation in \c Rdata.
+int
+AFSDB::compare(const Rdata& other) const {
+ const AFSDB& other_afsdb = dynamic_cast<const AFSDB&>(other);
+ if (subtype_ < other_afsdb.subtype_) {
+ return (-1);
+ } else if (subtype_ > other_afsdb.subtype_) {
+ return (1);
+ }
+
+ return (compareNames(server_, other_afsdb.server_));
+}
+
+const Name&
+AFSDB::getServer() const {
+ return (server_);
+}
+
+uint16_t
+AFSDB::getSubtype() const {
+ return (subtype_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/afsdb_18.h b/src/lib/dns/rdata/generic/afsdb_18.h
new file mode 100644
index 0000000..4a46775
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.h
@@ -0,0 +1,74 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::AFSDB class represents the AFSDB RDATA as defined %in
+/// RFC1183.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// AFSDB RDATA.
+class AFSDB : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// This method never throws an exception.
+ AFSDB& operator=(const AFSDB& source);
+ ///
+ /// Specialized methods
+ ///
+
+ /// \brief Return the value of the server field.
+ ///
+ /// \return A reference to a \c Name class object corresponding to the
+ /// internal server name.
+ ///
+ /// This method never throws an exception.
+ const Name& getServer() const;
+
+ /// \brief Return the value of the subtype field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getSubtype() const;
+
+private:
+ uint16_t subtype_;
+ Name server_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/rrsig_46.cc b/src/lib/dns/rdata/generic/rrsig_46.cc
index 0c82406..fc8e340 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.cc
+++ b/src/lib/dns/rdata/generic/rrsig_46.cc
@@ -243,5 +243,10 @@ RRSIG::compare(const Rdata& other) const {
}
}
+const RRType&
+RRSIG::typeCovered() {
+ return (impl_->covered_);
+}
+
// END_RDATA_NAMESPACE
// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/rrsig_46.h b/src/lib/dns/rdata/generic/rrsig_46.h
index 19acc40..b8e6306 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.h
+++ b/src/lib/dns/rdata/generic/rrsig_46.h
@@ -38,6 +38,9 @@ public:
// END_COMMON_MEMBERS
RRSIG& operator=(const RRSIG& source);
~RRSIG();
+
+ // specialized methods
+ const RRType& typeCovered();
private:
RRSIGImpl* impl_;
};
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index 667c8a8..37dd7fa 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -32,6 +32,7 @@ run_unittests_SOURCES += rdata_ns_unittest.cc rdata_soa_unittest.cc
run_unittests_SOURCES += rdata_txt_unittest.cc rdata_mx_unittest.cc
run_unittests_SOURCES += rdata_ptr_unittest.cc rdata_cname_unittest.cc
run_unittests_SOURCES += rdata_dname_unittest.cc
+run_unittests_SOURCES += rdata_afsdb_unittest.cc
run_unittests_SOURCES += rdata_opt_unittest.cc
run_unittests_SOURCES += rdata_dnskey_unittest.cc
run_unittests_SOURCES += rdata_ds_unittest.cc
diff --git a/src/lib/dns/tests/rdata_afsdb_unittest.cc b/src/lib/dns/tests/rdata_afsdb_unittest.cc
new file mode 100644
index 0000000..7df8d83
--- /dev/null
+++ b/src/lib/dns/tests/rdata_afsdb_unittest.cc
@@ -0,0 +1,210 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+const char* const afsdb_text = "1 afsdb.example.com.";
+const char* const afsdb_text2 = "0 root.example.com.";
+const char* const too_long_label("012345678901234567890123456789"
+ "0123456789012345678901234567890123");
+
+namespace {
+class Rdata_AFSDB_Test : public RdataTest {
+protected:
+ Rdata_AFSDB_Test() :
+ rdata_afsdb(string(afsdb_text)), rdata_afsdb2(string(afsdb_text2))
+ {}
+
+ const generic::AFSDB rdata_afsdb;
+ const generic::AFSDB rdata_afsdb2;
+ vector<uint8_t> expected_wire;
+};
+
+
+TEST_F(Rdata_AFSDB_Test, createFromText) {
+ EXPECT_EQ(1, rdata_afsdb.getSubtype());
+ EXPECT_EQ(Name("afsdb.example.com."), rdata_afsdb.getServer());
+
+ EXPECT_EQ(0, rdata_afsdb2.getSubtype());
+ EXPECT_EQ(Name("root.example.com."), rdata_afsdb2.getServer());
+}
+
+TEST_F(Rdata_AFSDB_Test, badText) {
+ // subtype is too large
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("99999999 afsdb.example.com."),
+ InvalidRdataText);
+ // incomplete text
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("10"), InvalidRdataText);
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("SPOON"), InvalidRdataText);
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("1root.example.com."), InvalidRdataText);
+ // number of fields (must be 2) is incorrect
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("10 afsdb. example.com."),
+ InvalidRdataText);
+ // bad name
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("1 afsdb.example.com." +
+ string(too_long_label)), TooLongLabel);
+}
+
+TEST_F(Rdata_AFSDB_Test, assignment) {
+ generic::AFSDB copy((string(afsdb_text2)));
+ copy = rdata_afsdb;
+ EXPECT_EQ(0, copy.compare(rdata_afsdb));
+
+ // Check if the copied data is valid even after the original is deleted
+ generic::AFSDB* copy2 = new generic::AFSDB(rdata_afsdb);
+ generic::AFSDB copy3((string(afsdb_text2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_afsdb));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_afsdb));
+}
+
+TEST_F(Rdata_AFSDB_Test, createFromWire) {
+ // uncompressed names
+ EXPECT_EQ(0, rdata_afsdb.compare(
+ *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire1.wire")));
+ // compressed name
+ EXPECT_EQ(0, rdata_afsdb.compare(
+ *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire2.wire", 13)));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire3.wire"),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire4.wire"),
+ InvalidRdataLength);
+ // bogus server name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire5.wire"),
+ DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireBuffer) {
+ // construct actual data
+ rdata_afsdb.toWire(obuffer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ &expected_wire[0], expected_wire.size());
+
+ // clear buffer for the next test
+ obuffer.clear();
+
+ // construct actual data
+ Name("example.com.").toWire(obuffer);
+ rdata_afsdb2.toWire(obuffer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireRenderer) {
+ // similar to toWireBuffer, but names in RDATA could be compressed due to
+ // preceding names. Actually they must not be compressed according to
+ // RFC3597, and this test checks that.
+
+ // construct actual data
+ rdata_afsdb.toWire(renderer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ renderer.getData(), renderer.getLength(),
+ &expected_wire[0], expected_wire.size());
+
+ // clear renderer for the next test
+ renderer.clear();
+
+ // construct actual data
+ Name("example.com.").toWire(obuffer);
+ rdata_afsdb2.toWire(renderer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ renderer.getData(), renderer.getLength(),
+ &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toText) {
+ EXPECT_EQ(afsdb_text, rdata_afsdb.toText());
+ EXPECT_EQ(afsdb_text2, rdata_afsdb2.toText());
+}
+
+TEST_F(Rdata_AFSDB_Test, compare) {
+ // check reflexivity
+ EXPECT_EQ(0, rdata_afsdb.compare(rdata_afsdb));
+
+ // name must be compared in case-insensitive manner
+ EXPECT_EQ(0, rdata_afsdb.compare(generic::AFSDB("1 "
+ "AFSDB.example.com.")));
+
+ const generic::AFSDB small1("10 afsdb.example.com");
+ const generic::AFSDB large1("65535 afsdb.example.com");
+ const generic::AFSDB large2("256 afsdb.example.com");
+
+ // confirm these are compared as unsigned values
+ EXPECT_GT(0, rdata_afsdb.compare(large1));
+ EXPECT_LT(0, large1.compare(rdata_afsdb));
+
+ // confirm these are compared in network byte order
+ EXPECT_GT(0, small1.compare(large2));
+ EXPECT_LT(0, large2.compare(small1));
+
+ // another AFSDB whose server name is larger than that of rdata_afsdb.
+ const generic::AFSDB large3("256 zzzzz.example.com");
+ EXPECT_GT(0, large2.compare(large3));
+ EXPECT_LT(0, large3.compare(large2));
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_afsdb.compare(*rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_rrsig_unittest.cc b/src/lib/dns/tests/rdata_rrsig_unittest.cc
index 903021f..3324b99 100644
--- a/src/lib/dns/tests/rdata_rrsig_unittest.cc
+++ b/src/lib/dns/tests/rdata_rrsig_unittest.cc
@@ -47,7 +47,7 @@ TEST_F(Rdata_RRSIG_Test, fromText) {
"f49t+sXKPzbipN9g+s1ZPiIyofc=");
generic::RRSIG rdata_rrsig(rrsig_txt);
EXPECT_EQ(rrsig_txt, rdata_rrsig.toText());
-
+ EXPECT_EQ(isc::dns::RRType::A(), rdata_rrsig.typeCovered());
}
TEST_F(Rdata_RRSIG_Test, badText) {
diff --git a/src/lib/dns/tests/testdata/Makefile.am b/src/lib/dns/tests/testdata/Makefile.am
index 743b5d2..d93470e 100644
--- a/src/lib/dns/tests/testdata/Makefile.am
+++ b/src/lib/dns/tests/testdata/Makefile.am
@@ -30,6 +30,10 @@ BUILT_SOURCES += rdata_rp_fromWire1.wire rdata_rp_fromWire2.wire
BUILT_SOURCES += rdata_rp_fromWire3.wire rdata_rp_fromWire4.wire
BUILT_SOURCES += rdata_rp_fromWire5.wire rdata_rp_fromWire6.wire
BUILT_SOURCES += rdata_rp_toWire1.wire rdata_rp_toWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire1.wire rdata_afsdb_fromWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire3.wire rdata_afsdb_fromWire4.wire
+BUILT_SOURCES += rdata_afsdb_fromWire5.wire
+BUILT_SOURCES += rdata_afsdb_toWire1.wire rdata_afsdb_toWire2.wire
BUILT_SOURCES += rdata_soa_toWireUncompressed.wire
BUILT_SOURCES += rdata_txt_fromWire2.wire rdata_txt_fromWire3.wire
BUILT_SOURCES += rdata_txt_fromWire4.wire rdata_txt_fromWire5.wire
@@ -99,6 +103,10 @@ EXTRA_DIST += rdata_rp_fromWire1.spec rdata_rp_fromWire2.spec
EXTRA_DIST += rdata_rp_fromWire3.spec rdata_rp_fromWire4.spec
EXTRA_DIST += rdata_rp_fromWire5.spec rdata_rp_fromWire6.spec
EXTRA_DIST += rdata_rp_toWire1.spec rdata_rp_toWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire1.spec rdata_afsdb_fromWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire3.spec rdata_afsdb_fromWire4.spec
+EXTRA_DIST += rdata_afsdb_fromWire5.spec
+EXTRA_DIST += rdata_afsdb_toWire1.spec rdata_afsdb_toWire2.spec
EXTRA_DIST += rdata_soa_fromWire rdata_soa_toWireUncompressed.spec
EXTRA_DIST += rdata_srv_fromWire
EXTRA_DIST += rdata_txt_fromWire1 rdata_txt_fromWire2.spec
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
new file mode 100644
index 0000000..f831313
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: afsdb
+[afsdb]
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
new file mode 100644
index 0000000..f33e768
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com
+[afsdb]
+server: afsdb.ptr=0
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
new file mode 100644
index 0000000..993032f
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
new file mode 100644
index 0000000..37abf13
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
new file mode 100644
index 0000000..0ea79dd
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+server: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
new file mode 100644
index 0000000..1946458
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
new file mode 100644
index 0000000..c80011a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
@@ -0,0 +1,8 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com.
+[afsdb]
+subtype: 0
+server: root.example.com
+rdlen: -1
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index 4fa9d58..ba7724c 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -91,6 +91,7 @@ COMMAND_CONFIG_UPDATE = "config_update"
COMMAND_MODULE_SPECIFICATION_UPDATE = "module_specification_update"
COMMAND_GET_COMMANDS_SPEC = "get_commands_spec"
+COMMAND_GET_STATISTICS_SPEC = "get_statistics_spec"
COMMAND_GET_CONFIG = "get_config"
COMMAND_SET_CONFIG = "set_config"
COMMAND_GET_MODULE_SPEC = "get_module_spec"
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 18e001c..1db9fd3 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -267,6 +267,19 @@ class ConfigManager:
commands[module_name] = self.module_specs[module_name].get_commands_spec()
return commands
+ def get_statistics_spec(self, name = None):
+ """Returns a dict containing 'module_name': statistics_spec for
+ all modules. If name is specified, only that module will
+ be included"""
+ statistics = {}
+ if name:
+ if name in self.module_specs:
+ statistics[name] = self.module_specs[name].get_statistics_spec()
+ else:
+ for module_name in self.module_specs.keys():
+ statistics[module_name] = self.module_specs[module_name].get_statistics_spec()
+ return statistics
+
def read_config(self):
"""Read the current configuration from the file specificied at init()"""
try:
@@ -457,6 +470,8 @@ class ConfigManager:
if cmd:
if cmd == ccsession.COMMAND_GET_COMMANDS_SPEC:
answer = ccsession.create_answer(0, self.get_commands_spec())
+ elif cmd == ccsession.COMMAND_GET_STATISTICS_SPEC:
+ answer = ccsession.create_answer(0, self.get_statistics_spec())
elif cmd == ccsession.COMMAND_GET_MODULE_SPEC:
answer = self._handle_get_module_spec(arg)
elif cmd == ccsession.COMMAND_GET_CONFIG:
diff --git a/src/lib/python/isc/config/module_spec.py b/src/lib/python/isc/config/module_spec.py
index 9aa49e0..b79f928 100644
--- a/src/lib/python/isc/config/module_spec.py
+++ b/src/lib/python/isc/config/module_spec.py
@@ -23,6 +23,7 @@
import json
import sys
+import time
import isc.cc.data
@@ -91,7 +92,7 @@ class ModuleSpec:
return _validate_spec_list(data_def, full, data, errors)
else:
# no spec, always bad
- if errors != None:
+ if errors is not None:
errors.append("No config_data specification")
return False
@@ -117,6 +118,26 @@ class ModuleSpec:
return False
+ def validate_statistics(self, full, stat, errors = None):
+ """Check whether the given piece of data conforms to this
+ data definition. If so, it returns True. If not, it will
+ return false. If errors is given, and is an array, a string
+ describing the error will be appended to it. The current
+ version stops as soon as there is one error so this list
+ will not be exhaustive. If 'full' is true, it also errors on
+ non-optional missing values. Set this to False if you want to
+ validate only a part of a statistics tree (like a list of
+ non-default values). Also it checks 'item_format' in case
+ of time"""
+ stat_spec = self.get_statistics_spec()
+ if stat_spec is not None:
+ return _validate_spec_list(stat_spec, full, stat, errors)
+ else:
+ # no spec, always bad
+ if errors is not None:
+ errors.append("No statistics specification")
+ return False
+
def get_module_name(self):
"""Returns a string containing the name of the module as
specified by the specification given at __init__()"""
@@ -152,6 +173,14 @@ class ModuleSpec:
else:
return None
+ def get_statistics_spec(self):
+ """Returns a dict representation of the statistics part of the
+ specification, or None if there is none."""
+ if 'statistics' in self._module_spec:
+ return self._module_spec['statistics']
+ else:
+ return None
+
def __str__(self):
"""Returns a string representation of the full specification"""
return self._module_spec.__str__()
@@ -160,8 +189,9 @@ def _check(module_spec):
"""Checks the full specification. This is a dict that contains the
element "module_spec", which is in itself a dict that
must contain at least a "module_name" (string) and optionally
- a "config_data" and a "commands" element, both of which are lists
- of dicts. Raises a ModuleSpecError if there is a problem."""
+ a "config_data", a "commands" and a "statistics" element, all
+ of which are lists of dicts. Raises a ModuleSpecError if there
+ is a problem."""
if type(module_spec) != dict:
raise ModuleSpecError("data specification not a dict")
if "module_name" not in module_spec:
@@ -173,6 +203,8 @@ def _check(module_spec):
_check_config_spec(module_spec["config_data"])
if "commands" in module_spec:
_check_command_spec(module_spec["commands"])
+ if "statistics" in module_spec:
+ _check_statistics_spec(module_spec["statistics"])
def _check_config_spec(config_data):
# config data is a list of items represented by dicts that contain
@@ -263,34 +295,75 @@ def _check_item_spec(config_item):
if type(map_item) != dict:
raise ModuleSpecError("map_item_spec element is not a dict")
_check_item_spec(map_item)
+ if 'item_format' in config_item and 'item_default' in config_item:
+ item_format = config_item["item_format"]
+ item_default = config_item["item_default"]
+ if not _check_format(item_default, item_format):
+ raise ModuleSpecError(
+ "Wrong format for " + str(item_default) + " in " + str(item_name))
+def _check_statistics_spec(statistics):
+ # statistics is a list of items represented by dicts that contain
+ # things like "item_name", depending on the type they can have
+ # specific subitems
+ """Checks a list that contains the statistics part of the
+ specification. Raises a ModuleSpecError if there is a
+ problem."""
+ if type(statistics) != list:
+ raise ModuleSpecError("statistics is of type " + str(type(statistics))
+ + ", not a list of items")
+ for stat_item in statistics:
+ _check_item_spec(stat_item)
+ # Additionally checks if there are 'item_title' and
+ # 'item_description'
+ for item in [ 'item_title', 'item_description' ]:
+ if item not in stat_item:
+ raise ModuleSpecError("no " + item + " in statistics item")
+
+def _check_format(value, format_name):
+ """Check if specified value and format are correct. Return True if
+ is is correct."""
+ # TODO: should be added other format types if necessary
+ time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
+ 'date' : "%Y-%m-%d",
+ 'time' : "%H:%M:%S" }
+ for fmt in time_formats:
+ if format_name == fmt:
+ try:
+ # reverse check
+ return value == time.strftime(
+ time_formats[fmt],
+ time.strptime(value, time_formats[fmt]))
+ except (ValueError, TypeError):
+ break
+ return False
def _validate_type(spec, value, errors):
"""Returns true if the value is of the correct type given the
specification"""
data_type = spec['item_type']
if data_type == "integer" and type(value) != int:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be an integer")
return False
elif data_type == "real" and type(value) != float:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a real")
return False
elif data_type == "boolean" and type(value) != bool:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a boolean")
return False
elif data_type == "string" and type(value) != str:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a string")
return False
elif data_type == "list" and type(value) != list:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a list")
return False
elif data_type == "map" and type(value) != dict:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a map")
return False
elif data_type == "named_set" and type(value) != dict:
@@ -300,6 +373,18 @@ def _validate_type(spec, value, errors):
else:
return True
+def _validate_format(spec, value, errors):
+ """Returns true if the value is of the correct format given the
+ specification. And also return true if no 'item_format'"""
+ if "item_format" in spec:
+ item_format = spec['item_format']
+ if not _check_format(value, item_format):
+ if errors is not None:
+ errors.append("format type of " + str(value)
+ + " should be " + item_format)
+ return False
+ return True
+
def _validate_item(spec, full, data, errors):
if not _validate_type(spec, data, errors):
return False
@@ -308,6 +393,8 @@ def _validate_item(spec, full, data, errors):
for data_el in data:
if not _validate_type(list_spec, data_el, errors):
return False
+ if not _validate_format(list_spec, data_el, errors):
+ return False
if list_spec['item_type'] == "map":
if not _validate_item(list_spec, full, data_el, errors):
return False
@@ -322,6 +409,8 @@ def _validate_item(spec, full, data, errors):
return False
if not _validate_item(named_set_spec, full, data_el, errors):
return False
+ elif not _validate_format(spec, data, errors):
+ return False
return True
def _validate_spec(spec, full, data, errors):
@@ -333,7 +422,7 @@ def _validate_spec(spec, full, data, errors):
elif item_name in data:
return _validate_item(spec, full, data[item_name], errors)
elif full and not item_optional:
- if errors != None:
+ if errors is not None:
errors.append("non-optional item " + item_name + " missing")
return False
else:
@@ -358,7 +447,7 @@ def _validate_spec_list(module_spec, full, data, errors):
if spec_item["item_name"] == item_name:
found = True
if not found and item_name != "version":
- if errors != None:
+ if errors is not None:
errors.append("unknown item " + item_name)
validated = False
return validated
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index 0a9e2d3..eacc425 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -219,6 +219,25 @@ class TestConfigManager(unittest.TestCase):
commands_spec = self.cm.get_commands_spec('Spec2')
self.assertEqual(commands_spec['Spec2'], module_spec.get_commands_spec())
+ def test_get_statistics_spec(self):
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec, {})
+ module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
+ self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.cm.set_module_spec(module_spec)
+ self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec, { 'Spec1': None })
+ self.cm.remove_module_spec('Spec1')
+ module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
+ self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.cm.set_module_spec(module_spec)
+ self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
+ statistics_spec = self.cm.get_statistics_spec('Spec2')
+ self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
+
def test_read_config(self):
self.assertEqual(self.cm.config.data, {'version': config_data.BIND10_CONFIG_DATA_VERSION})
self.cm.read_config()
@@ -241,6 +260,7 @@ class TestConfigManager(unittest.TestCase):
self._handle_msg_helper("", { 'result': [ 1, 'Unknown message format: ']})
self._handle_msg_helper({ "command": [ "badcommand" ] }, { 'result': [ 1, "Unknown command: badcommand"]})
self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, {} ]})
+ self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, {} ]})
self._handle_msg_helper({ "command": [ "get_module_spec" ] }, { 'result': [ 0, {} ]})
self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "Spec2" } ] }, { 'result': [ 0, {} ]})
#self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "nosuchmodule" } ] },
@@ -329,6 +349,7 @@ class TestConfigManager(unittest.TestCase):
{ "module_name" : "Spec2" } ] },
{ 'result': [ 0, self.spec.get_full_spec() ] })
self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_commands_spec() } ]})
+ self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_statistics_spec() } ]})
# re-add this once we have new way to propagate spec changes (1 instead of the current 2 messages)
#self.assertEqual(len(self.fake_session.message_queue), 2)
# the name here is actually wrong (and hardcoded), but needed in the current version
@@ -450,6 +471,7 @@ class TestConfigManager(unittest.TestCase):
def test_run(self):
self.fake_session.group_sendmsg({ "command": [ "get_commands_spec" ] }, "ConfigManager")
+ self.fake_session.group_sendmsg({ "command": [ "get_statistics_spec" ] }, "ConfigManager")
self.fake_session.group_sendmsg({ "command": [ "shutdown" ] }, "ConfigManager")
self.cm.run()
pass
diff --git a/src/lib/python/isc/config/tests/module_spec_test.py b/src/lib/python/isc/config/tests/module_spec_test.py
index be862c5..fc53d23 100644
--- a/src/lib/python/isc/config/tests/module_spec_test.py
+++ b/src/lib/python/isc/config/tests/module_spec_test.py
@@ -81,6 +81,11 @@ class TestModuleSpec(unittest.TestCase):
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec20.spec")
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec21.spec")
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec26.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec34.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec35.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec36.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec37.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec38.spec")
def validate_data(self, specfile_name, datafile_name):
dd = self.read_spec_file(specfile_name);
@@ -123,6 +128,17 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(False, self.validate_command_params("spec27.spec", "data22_8.data", 'cmd1'))
self.assertEqual(False, self.validate_command_params("spec27.spec", "data22_8.data", 'cmd2'))
+ def test_statistics_validation(self):
+ def _validate_stat(specfile_name, datafile_name):
+ dd = self.read_spec_file(specfile_name);
+ data_file = open(self.spec_file(datafile_name))
+ data_str = data_file.read()
+ data = isc.cc.data.parse_value_str(data_str)
+ return dd.validate_statistics(True, data, [])
+ self.assertFalse(self.read_spec_file("spec1.spec").validate_statistics(True, None, None));
+ self.assertTrue(_validate_stat("spec33.spec", "data33_1.data"))
+ self.assertFalse(_validate_stat("spec33.spec", "data33_2.data"))
+
def test_init(self):
self.assertRaises(ModuleSpecError, ModuleSpec, 1)
module_spec = isc.config.module_spec_from_file(self.spec_file("spec1.spec"), False)
@@ -269,6 +285,80 @@ class TestModuleSpec(unittest.TestCase):
}
)
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_datetime",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "date-time"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_date",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "date"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_time",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "time"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_datetime",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "2011-05-27T19:42:57Z",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_date",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "2011-05-27",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_time",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "19:42:57Z",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ def test_check_format(self):
+ self.assertTrue(isc.config.module_spec._check_format('2011-05-27T19:42:57Z', 'date-time'))
+ self.assertTrue(isc.config.module_spec._check_format('2011-05-27', 'date'))
+ self.assertTrue(isc.config.module_spec._check_format('19:42:57', 'time'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T19:42:57Z', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('19:42:57', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-13-99T99:99:99Z', 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-13-99', 'date'))
+ self.assertFalse(isc.config.module_spec._check_format('99:99:99', 'time'))
+ self.assertFalse(isc.config.module_spec._check_format('', 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format(None, 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format(None, None))
+ # wrong date-time-type format not ending with "Z"
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T19:42:57', 'date-time'))
+ # wrong date-type format ending with "T"
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T', 'date'))
+ # wrong time-type format ending with "Z"
+ self.assertFalse(isc.config.module_spec._check_format('19:42:57Z', 'time'))
+
def test_validate_type(self):
errors = []
self.assertEqual(True, isc.config.module_spec._validate_type({ 'item_type': 'integer' }, 1, errors))
@@ -306,6 +396,25 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(False, isc.config.module_spec._validate_type({ 'item_type': 'map' }, 1, errors))
self.assertEqual(['1 should be a map'], errors)
+ def test_validate_format(self):
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "2011-05-27T19:42:57Z", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "a", errors))
+ self.assertEqual(['format type of a should be date-time'], errors)
+
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "2011-05-27", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "a", errors))
+ self.assertEqual(['format type of a should be date'], errors)
+
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "19:42:57", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "a", errors))
+ self.assertEqual(['format type of a should be time'], errors)
+
def test_validate_spec(self):
spec = { 'item_name': "an_item",
'item_type': "string",
diff --git a/src/lib/util/filename.h b/src/lib/util/filename.h
index c9874ce..f625938 100644
--- a/src/lib/util/filename.h
+++ b/src/lib/util/filename.h
@@ -103,6 +103,11 @@ public:
return (extension_);
}
+ /// \return Name + extension of Given File Name
+ std::string nameAndExtension() const {
+ return (name_ + extension_);
+ }
+
/// \brief Expand Name with Default
///
/// A default file specified is supplied and used to fill in any missing
diff --git a/src/lib/util/python/gen_wiredata.py.in b/src/lib/util/python/gen_wiredata.py.in
index 8e1f079..6a69c29 100755
--- a/src/lib/util/python/gen_wiredata.py.in
+++ b/src/lib/util/python/gen_wiredata.py.in
@@ -822,6 +822,27 @@ class RP(RR):
f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
f.write('%s %s\n' % (mailbox_wire, text_wire))
+class AFSDB(RR):
+ '''Implements rendering AFSDB RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - subtype (16 bit int): The subtype field.
+ - server (string): The server field.
+ The string must be interpreted as a valid domain name.
+ '''
+ subtype = 1
+ server = 'afsdb.example.com'
+ def dump(self, f):
+ server_wire = encode_name(self.server)
+ if self.rdlen is None:
+ self.rdlen = 2 + len(server_wire) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# SUBTYPE=%d SERVER=%s\n' % (self.subtype, self.server))
+ f.write('%04x %s\n' % (self.subtype, server_wire))
+
class NSECBASE(RR):
'''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
these RRs. The NSEC and NSEC3 classes will be inherited from this
diff --git a/src/lib/util/tests/filename_unittest.cc b/src/lib/util/tests/filename_unittest.cc
index be29ff1..07f3525 100644
--- a/src/lib/util/tests/filename_unittest.cc
+++ b/src/lib/util/tests/filename_unittest.cc
@@ -51,42 +51,49 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/alpha/beta/", fname.directory());
EXPECT_EQ("gamma", fname.name());
EXPECT_EQ(".delta", fname.extension());
+ EXPECT_EQ("gamma.delta", fname.nameAndExtension());
// Directory only
fname.setName("/gamma/delta/");
EXPECT_EQ("/gamma/delta/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// Filename only
fname.setName("epsilon");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("epsilon", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("epsilon", fname.nameAndExtension());
// Extension only
fname.setName(".zeta");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".zeta", fname.extension());
+ EXPECT_EQ(".zeta", fname.nameAndExtension());
// Missing directory
fname.setName("eta.theta");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("eta", fname.name());
EXPECT_EQ(".theta", fname.extension());
+ EXPECT_EQ("eta.theta", fname.nameAndExtension());
// Missing filename
fname.setName("/iota/.kappa");
EXPECT_EQ("/iota/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".kappa", fname.extension());
+ EXPECT_EQ(".kappa", fname.nameAndExtension());
// Missing extension
fname.setName("lambda/mu/nu");
EXPECT_EQ("lambda/mu/", fname.directory());
EXPECT_EQ("nu", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("nu", fname.nameAndExtension());
// Check that the decomposition can occur in the presence of leading and
// trailing spaces
@@ -94,18 +101,21 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("lambda/mu/", fname.directory());
EXPECT_EQ("nu", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("nu", fname.nameAndExtension());
// Empty string
fname.setName("");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// ... and just spaces
fname.setName(" ");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// Check corner cases - where separators are present, but strings are
// absent.
@@ -113,16 +123,19 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
fname.setName(".");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(".", fname.nameAndExtension());
fname.setName("/.");
EXPECT_EQ("/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(".", fname.nameAndExtension());
// Note that the space is a valid filename here; only leading and trailing
// spaces should be trimmed.
@@ -130,11 +143,13 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/", fname.directory());
EXPECT_EQ(" ", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(" .", fname.nameAndExtension());
fname.setName(" / . ");
EXPECT_EQ("/", fname.directory());
EXPECT_EQ(" ", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(" .", fname.nameAndExtension());
}
// Check that the expansion with a default works.
More information about the bind10-changes
mailing list