BIND 10 trac1035, updated. 8730224ec6ab50b04360caeb4ea72529574911fd Merge branch 'master' into trac1035
BIND 10 source code commits
bind10-changes at lists.isc.org
Tue Jun 28 11:50:02 UTC 2011
The branch, trac1035 has been updated
via 8730224ec6ab50b04360caeb4ea72529574911fd (commit)
via d7c2d4ab317636ed434265d885a433c8e669db10 (commit)
via 3c3ef0445aefddccf5cebab6edb347536e3adc71 (commit)
via 4ad7f97c789c2e2747506b7220bc279898aadeb4 (commit)
via e0744372924442ec75809d3964e917680c57a2ce (commit)
via 499ca0aa022f72674c29a2c66050a5cf1ee9f192 (commit)
via 376df1c9ac466742afad6681ff2e431ebfd75b63 (commit)
via b3a9a4228b95713636958a12f625434ef3cc3277 (commit)
via fb993ba8c52dca4a3a261e319ed095e5af8db15a (commit)
via cac876d0c8ab31aa9007411aacb5ef6ecda398a0 (commit)
via 1cbcbc84256489b664fe90553d3bb3579a33946c (commit)
via f9245031dcdecba55204916535555ea20374878a (commit)
via f9b5323ae8c8ffd7d4d2b69c360dc497b935d6de (commit)
via 3bb777140c5bc99775d5b8e0ea55711e227f0012 (commit)
via 8ec67a677e0ee2ecab48d112c3c5f5a5c5753543 (commit)
via 041c3ec8a0768c513131f47467652ab2aa75a07a (commit)
via ab31e2fbf10950084d9cda73c0b4fc7d36296817 (commit)
via 95707bad7adcc6963baebc1b7e3b005d1b8e316b (commit)
via 373bed0a95cbe38e67282e7ccca8cdd8fc2372f0 (commit)
via a01f7fc667d5fe05428231479d8e934673b40407 (commit)
via 5355f8f14648fddf13cda7240530e7b4216da671 (commit)
via dcdabc780fce8d02c9263f8e98f03b29bb4e5210 (commit)
via 0fad7d4a8557741f953eda9fed1d351a3d9dc5ef (commit)
via 3e861eb6aec036b3c5a2f6a71c6ff3adbdc9a55a (commit)
via 502100d7b9cd9d2300e78826a3bddd024ef38a74 (commit)
via b477df5d4dbce5b72ebd183b83555f62aa3fcec5 (commit)
via 701c0d6d7c484c2f46951d23fba47c760363b7e4 (commit)
via 5d5173ef0cc48d206464b39f696d03bae9daecea (commit)
via 7cc074aacd5159778111fa4cbdbe1c89e6a4e51b (commit)
via dc087934c1a1946cfdcf63b49a70aa0fefe6b282 (commit)
via 6f7998f9a209e9dd7b3ac80793098dfd81b489b9 (commit)
via feed2b3537a4e57e4cb55232242c6622d1fcc654 (commit)
via 21a333f512f2a11ce0c770b7d72aacfb623d0c14 (commit)
via fb032e397153a63e4f1bd3b9b7fc1a89c01e7d6f (commit)
via 9395f12c95a2519803a0dc15b56424c22df88c84 (commit)
via d57f30ffe93b7f45aa6492ea1fba5d594adc01df (commit)
via 690dafd743f765f04b21d3ce15ec0a63da6a53bd (commit)
via 251a32a1fd1e7be23d59790e57a4b40fbcdceae3 (commit)
via 6bc6c57d5761ccd2ef65291e81bbfd995b4758a9 (commit)
via 0f1b7a45520517a40b7b85d57d461e20e81b7aa9 (commit)
via 885a4ecf9c87b8e3a028b6488b0e6b853365edc8 (commit)
via 8d5a5b95c85af1f15654fe164f306fe21065ea73 (commit)
via 77367a5d67709b65afd8689159e5192416326cb7 (commit)
via 935bd760ed4f39213f8db8eab730bf41dc217da9 (commit)
via 52adf933c0bed4753a06632b25a46055d23eb655 (commit)
via 4fe29ae03d1ff8f6d721b42f4bb356702110c4e0 (commit)
via e3fa282a59eea69c50dcb9354e568a8503510511 (commit)
via 58df861a260fdf06b17194e224fb8c1bd03f0392 (commit)
via 77e3f8cf3f3fe79c7dd5f92f30d70c47b515f4cd (commit)
via 4a88c75d4d1decc3b3d5518bd12d592c118a7fd5 (commit)
via ea1b177b5503687f974252d185a9543066af20ed (commit)
via 6535d4fbe623226171b27730f60161436d0433e4 (commit)
via 0f4c693c3399bd9ecf2d2a5682fda8ed1eb8158f (commit)
via 877e89713ad2398b6637b843a22c3b12607fe5bb (commit)
via 33e08ca107c127d5c158882e7f2e86770a48c572 (commit)
via 32fb3ad97a7ccc65ef391b84c8f488d4ea71e963 (commit)
via 04e7fe3f480462d288c17bd121a368b74292cfd3 (commit)
via 354fcf46bf93f1e2e317043f2998a8b17f22fe04 (commit)
via 21acce853a4269f0db76dc2768bb7c5107b1b7d4 (commit)
via c021505a1a0d6ecb15a8fd1592b94baff6d115f4 (commit)
via 02aa9813c1f6829bb9089400c5397f3faba7d9e0 (commit)
via 3017593b63f34c4bc69494be8c80327eaad5d922 (commit)
via 62bc6cce6fe7343c4ef06c7e690939fd0aa20148 (commit)
via 77c17d3f03de64646da89de238288a22c49e3eb5 (commit)
via 6f8383136ae83eb439c71a70c4bde83524b72c5e (commit)
via a16c7925f9a00f44680e2ca984def99d6bb3cecf (commit)
via 12c37af78f65301858be28679695a9e818270947 (commit)
via c58fa9e4c5aa486bb270681a45a4f0f7e04b4139 (commit)
via 89324744df3f73de1beaefb9420aeab5f9ff7824 (commit)
via f9070aee950581a47c0916cb1f3b48cd4bfcb7f4 (commit)
via 0c1589a0842cefe0793b538c53c1cb102080b570 (commit)
via 166b4747ffadbc6b3a94647f1470ba776aeb8c51 (commit)
via 181283a52982eaf9f8637bd09a2e1dfaef5ce302 (commit)
via a46b0d4ab62e16c4096bcb8790659bee93205470 (commit)
via aba816f40efe336b20ae56871a531c87117ad24c (commit)
via 217c09751aab2dc84f49e7942b2c081a0381945f (commit)
via ea15d26afc9ced4a11aea6733ea3df0969c5618b (commit)
via f685e5c06c382180eb1775bce714ea60154b08f2 (commit)
via 5a19ee14367d9bb796c8e43c034ee9f327052c86 (commit)
via f92d30bb55decf9ed4d7cdf10231dfe2913ca11a (commit)
via 461a9d0a1e896e0a1b676c6873f74404d5ab95c1 (commit)
via bc81810505f7263aedb8654d139510058c251626 (commit)
via b57c51e6ddfc6770d5c66eab5aeb1a5238e5a7ea (commit)
via ddb1b2241fc03a1d08dea42907ee8f859d3b2f46 (commit)
via 0b838ba0d3c60203a52d1a918333846116e607cb (commit)
via f77021d7838e33e1662b42776ccc49be4435b1f2 (commit)
via 632cd6151b871e060d09a79f6b8a283cc0ab469c (commit)
via 7358d4af5775ee1bfa6099f63443d2ad27347f0d (commit)
via 81a2df84a879ca5cbaaa61dffce5c413d920011d (commit)
via 59b380d3682bb9fca26cae2c70c6c49934823f01 (commit)
via 8b2247a6ae88fbf16bfd65852feb0216a4ea4dac (commit)
via 1b01a9d09e5ecf21ff8bd9cce1c20372846a775c (commit)
via 735f817c7f66813135b4ef576c117aa424a5bdad (commit)
via fef88019d325474471a353304499e7919023912e (commit)
via 99522dd887762e71cbf4d895486f0e2f915eabda (commit)
via 999736efa5e3aaf06949675c4f77e1ef9cd0d71b (commit)
via 9c862cc45629b24d0a704926d339796926c692e5 (commit)
via 85d5708e2c44e04b1a148610434de2c040d7142b (commit)
via e6b3d50483fb739da2ca83e493a1c30043ba0464 (commit)
via fc29e92af2bd2cfe8fa77dd311b9382680fd6324 (commit)
via 78cffeb00933814658da0867ada0209403946b51 (commit)
via 9129a474d3289157a4d8eb761383352dbfc2586e (commit)
via 417893fc06dcd5339e2cd0278a6badbbe847d6c4 (commit)
via dd4c4405f56ebbcb74d8f792ad528daf9b2bc79a (commit)
via 8e715d5202d79361622e89ef11a0d433558768f8 (commit)
via 5ac59583a36f1d83d35ba8d159f87bb281d3edc5 (commit)
via 1368c87b932734919bc0f392b351651cc6dd03d7 (commit)
via 79ec6c320ec5c24036856fd6b589ba8bf8b26ffc (commit)
via 8f5fafa643f2d908b9e97b6d08aeb55c4b96addf (commit)
via 01f9c1c0adfb37d11133c87056161f1edfba2672 (commit)
via ac7aaa887d827f8bdf1c2881d245cc655c6847b7 (commit)
via ebb6493b8ff763d42fe99438c8befe48c381b4aa (commit)
via c786a61641a965545c2e304b1c946afdedc6dc1a (commit)
via 1efa5d9d7f699cc3ee636d4e1b50b3fb3a863180 (commit)
via e5251c4886f626e6ef9f6ba82771c0e949e0071f (commit)
via aaad42c52aed2c3890378511ecb2f97a3731d23a (commit)
via 4beebf47805d0c3f80872e8f690f09c1658ae4e2 (commit)
via 792c8b202cffc8fed726f10b3514523b1fc92469 (commit)
via 8c624c6644563ed9c4fecec8b0b5f5dd115fe7ef (commit)
via d1c7f98e910bd19d21a649386f1a8066e4f41677 (commit)
via a90c8a06056300e0f9f5ffdae72b8a2ba26346fc (commit)
via 30570ab2d917dc6adec02ba272ee50c17124b688 (commit)
via 59908b70a929baf829202197d6e7ab5a3557da32 (commit)
via 585d1c63d6d0126607f424571e38a4a60683cf4b (commit)
via d335ae50bb855b7b302dab852005385c0227dcfb (commit)
via 8034dbfe87c45eaa2c0aef0e715b86fa79a7c4e3 (commit)
via 0ddf0f5fa4d9d18599a1642b9f87caaa1f463c5e (commit)
via 5a75094dfdd5f2307c4a1669e05db70355b08682 (commit)
via df5bad72ac8dac07a038f29823a1938bc9bbe72c (commit)
via 4e025223cedb89d5dff5c250ab3cab42bfeb195e (commit)
via d4ef4b9a0cc72eb9d85c6fef4aeb4b2f90b2b590 (commit)
via e560672e5119540d6e6860c177a9b969e5a71fb1 (commit)
via ebc9aa4dc554ec8aced4413b47a0668f3f5f1da1 (commit)
via ddd40bde5412d11fb4d320958f26572797442b74 (commit)
via 4565ca4899e702da0c515e11d614cddb3f483a7d (commit)
via ca924dafc7902bbc2a22660fb00f70c0d34c6471 (commit)
via 679d8390f4fb1253ac26a86a47a9279f3d88174e (commit)
via 250100ecf6468ad2cbc47663d1f6e83f1fe10f9a (commit)
via 5aabfb971d4338d3e488d05f8c06a9db973ede5d (commit)
via e9a1e75b3d83fe811d9a4e32d6d9a21f446a37d3 (commit)
via 9f2167d3b5878a5709fd9f1ba2cd200f29f057c8 (commit)
via 61b01087195d5d1f875f01c5fd2eac5dc61d012d (commit)
via 84fcd68d77cc4aba23721e234622c33666e96c49 (commit)
via 406cb1fd4af84fcbdf8339cf1afdae2cfb3b7946 (commit)
via 55689c559b3ac60765940d64a5b51007f94bddf7 (commit)
via 925ac83b98b02abec3f7f2a70b7c83170f851e29 (commit)
via 3f47015eab1abd9c7193a9e740f794c6a718c9f7 (commit)
via 4064b389d13d2861083499517f51d89492156099 (commit)
via 926985d03e3486f1a83615dc2794d310cb2cb520 (commit)
via 189f58f73fe02cf2729ab26d6ce8ab6469e82a1c (commit)
via 1ab0f2e8448a20674bfb8d12d463e5b3fec3ac6e (commit)
via dcb32f7928972c3ebe66f13a08560a1e19c62866 (commit)
via e25099da714a10dd3bc24be0002f9174fb9610c9 (commit)
via a6e68091deaf13986355b8763c7348b2da71d7d5 (commit)
via 362d429a32fecd1b59f309466e098935242f9054 (commit)
via 6c5a9b252b7bc062ed807aff342d0314811b5bde (commit)
via 8320629b004d5fc8194afb5d277a0d9e01299121 (commit)
via 75bda54b2b5cdf06f334e72cd554b616a887d1cf (commit)
via 2b97fc4f4f30bff13b94ad9b25766b4a6b2f6655 (commit)
via 8bb79638bc658d8e57b15ae1b16d28a08ec06a69 (commit)
via 81b49bb4d72fdfb5db8d7ad5f9b086c489acdb86 (commit)
via eeebde9d81c4bbc4e5388db5cd6148ca3589b91e (commit)
via 8c5e6268927737a472348d1ff8ecb2201c76b98a (commit)
via cda19a7cbc56ddd67c7d19ec7d072a64477d254b (commit)
via c65177c8ea0dfba3aaa84ea1bf2583b2d818d23d (commit)
via 430f3e516852ee9a8af655626bbab16b03e4cf72 (commit)
via 1b47d1cf3e0bc8e3c6166d049070dda2f298c7bf (commit)
via d86526508726fc2941a7d35730013b75f49ab4a5 (commit)
via a2158e5b2c17043f0f3aa194009408aa73bd62ce (commit)
via 4f87326ae6c17e26769b4ae276001b49d5bb3561 (commit)
via 06c9c2a763326d4b30ff9448f726928538fba94c (commit)
via 8d07954792d35120580d9d94fedd642d4797cd53 (commit)
via bc8bb2e13305ae879b31a6accbd3f5f1855bf327 (commit)
via 112aa5ce69ec2440db83d89196144b782f064564 (commit)
via 287edb431de6ae5d7106dd4e593a193908b9ba9f (commit)
via 99d7c21284686ba3d021a6d09938b82ea56de783 (commit)
via 309b24ff461b623770e950d6ff12654241bdd39b (commit)
via 52d165984d1a7784a1a6e0a3b845b19559698203 (commit)
via 67a88d3fd748cc42730e142cbfa79d0b7fb7a813 (commit)
via 42177c3c2dabc2b46adb7133374a4c2da46b1f9e (commit)
via c1ebc31d07e2c04c0158fbd3e7289db650b41c1d (commit)
via ad3f4a5e40390f14762648986dae8430760202c2 (commit)
via d3a000535b506ae8af54119af3dde7d14509654e (commit)
via 2048643593b3e9c8f34af40bbd00342c2c0c1318 (commit)
via bfd50c768ccf03b2e4f3d3ecbeb5fb344ff79129 (commit)
via ae21ebb0f609f8a2aa8ffc3d4b84c465111ec2c3 (commit)
via 7cf66b7e44e389205ae4344764fbf136550854ce (commit)
via 0c3b69c6e170bee7dd775090af2bdd1cae900080 (commit)
via f5edd310465966137f0cd4e2109d90f7e5d5965f (commit)
via 73ac6b09eeeebcdb03965076d4aa8a8a7a361ebe (commit)
via 86a307f08882d02ad443e848e096a30ca14ec918 (commit)
from ebf9a139d91ee05950e045fcbf2bf7acffe1755b (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 8730224ec6ab50b04360caeb4ea72529574911fd
Merge: ebf9a139d91ee05950e045fcbf2bf7acffe1755b d7c2d4ab317636ed434265d885a433c8e669db10
Author: Stephen Morris <stephen at isc.org>
Date: Tue Jun 28 12:31:01 2011 +0100
Merge branch 'master' into trac1035
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 47 +-
doc/guide/Makefile.am | 18 +-
doc/guide/bind10-guide.html | 90 +-
doc/guide/bind10-guide.xml | 86 +
doc/guide/bind10-messages.html | 841 ++++++++
doc/guide/bind10-messages.xml | 2018 ++++++++++++++++++++
src/bin/auth/Makefile.am | 14 +-
src/bin/auth/auth_log.cc | 26 +
src/bin/auth/auth_log.h | 54 +
src/bin/auth/auth_messages.mes | 260 +++
src/bin/auth/auth_srv.cc | 173 +--
src/bin/auth/auth_srv.h | 21 -
src/bin/auth/benchmarks/Makefile.am | 3 +
src/bin/auth/command.cc | 37 +-
src/bin/auth/main.cc | 51 +-
src/bin/auth/statistics.cc | 35 +-
src/bin/auth/statistics.h | 7 +-
src/bin/auth/tests/Makefile.am | 4 +
src/bin/auth/tests/auth_srv_unittest.cc | 9 -
src/bin/auth/tests/statistics_unittest.cc | 3 +-
src/bin/cfgmgr/plugins/b10logging.py | 19 +-
src/bin/cfgmgr/plugins/tests/Makefile.am | 2 +-
src/bin/cfgmgr/plugins/tests/logging_test.py | 135 ++
src/bin/resolver/Makefile.am | 12 +-
src/bin/resolver/main.cc | 12 +-
src/bin/resolver/resolver.cc | 270 ++-
src/bin/resolver/resolver.h | 31 +
src/bin/resolver/resolver.spec.pre.in | 35 +
src/bin/resolver/resolver_log.h | 2 +-
src/bin/resolver/resolver_messages.mes | 219 +++
src/bin/resolver/resolverdef.mes | 193 --
src/bin/resolver/tests/Makefile.am | 4 +-
src/bin/resolver/tests/resolver_config_unittest.cc | 169 ++-
src/bin/resolver/tests/resolver_unittest.cc | 61 +-
.../resolver/tests/response_scrubber_unittest.cc | 6 +
src/bin/xfrin/Makefile.am | 11 +-
src/bin/xfrin/xfrin.py.in | 60 +-
src/bin/xfrin/xfrin_messages.mes | 91 +
src/bin/xfrout/Makefile.am | 10 +-
src/bin/xfrout/xfrout.py.in | 74 +-
src/bin/xfrout/xfrout_messages.mes | 140 ++
src/lib/Makefile.am | 2 +-
src/lib/acl/Makefile.am | 16 +-
src/lib/acl/dns.cc | 34 +
src/lib/acl/dns.h | 89 +
src/lib/acl/ip_check.cc | 141 ++
src/lib/acl/ip_check.h | 417 ++++
src/lib/acl/loader.h | 64 +-
src/lib/acl/logic_check.h | 206 ++
src/lib/acl/tests/Makefile.am | 19 +-
src/lib/acl/tests/acl_test.cc | 8 +-
src/lib/acl/tests/creators.h | 158 ++
src/lib/acl/tests/dns_test.cc | 35 +
src/lib/acl/tests/ip_check_unittest.cc | 640 +++++++
src/lib/acl/tests/loader_test.cc | 245 +--
src/lib/acl/tests/logcheck.h | 22 +-
src/lib/acl/tests/logic_check_test.cc | 245 +++
src/lib/acl/tests/run_unittests.cc | 3 +-
src/lib/asiodns/Makefile.am | 12 +-
src/lib/asiodns/asiodef.mes | 56 -
src/lib/asiodns/asiodns_messages.mes | 56 +
src/lib/asiodns/io_fetch.cc | 28 +-
src/lib/asiolink/io_endpoint.h | 44 +
src/lib/asiolink/tcp_endpoint.h | 8 +
src/lib/asiolink/tests/io_endpoint_unittest.cc | 204 ++-
src/lib/asiolink/udp_endpoint.h | 8 +
src/lib/cc/data.h | 2 +-
src/lib/config/Makefile.am | 12 +-
src/lib/config/ccsession.cc | 60 +-
src/lib/config/ccsession.h | 37 +-
src/lib/config/config_log.h | 2 +-
src/lib/config/config_messages.mes | 59 +
src/lib/config/configdef.mes | 57 -
src/lib/config/tests/ccsession_unittests.cc | 62 +
src/lib/datasrc/Makefile.am | 12 +-
src/lib/datasrc/datasrc_messages.mes | 493 +++++
src/lib/datasrc/logger.h | 2 +-
src/lib/datasrc/messagedef.mes | 494 -----
src/lib/log/Makefile.am | 8 +-
src/lib/log/README | 321 ++--
src/lib/log/compiler/message.cc | 10 +-
src/lib/log/impldef.cc | 29 -
src/lib/log/impldef.h | 18 -
src/lib/log/impldef.mes | 38 -
src/lib/log/log_messages.cc | 63 +
src/lib/log/log_messages.h | 35 +
src/lib/log/log_messages.mes | 146 ++
src/lib/log/logger_level.cc | 4 +-
src/lib/log/logger_level_impl.cc | 8 +-
src/lib/log/logger_manager.cc | 17 +-
src/lib/log/logger_manager_impl.cc | 14 +-
src/lib/log/logimpl_messages.cc | 29 +
src/lib/log/logimpl_messages.h | 18 +
src/lib/log/logimpl_messages.mes | 43 +
src/lib/log/message_reader.cc | 28 +-
src/lib/log/messagedef.cc | 63 -
src/lib/log/messagedef.h | 35 -
src/lib/log/messagedef.mes | 131 --
src/lib/log/output_option.cc | 11 +-
src/lib/log/tests/destination_test.sh.in | 24 +-
src/lib/log/tests/local_file_test.sh.in | 38 +-
src/lib/log/tests/logger_example.cc | 32 +-
src/lib/log/tests/logger_level_unittest.cc | 2 +-
src/lib/log/tests/logger_manager_unittest.cc | 38 +-
src/lib/log/tests/logger_support_unittest.cc | 6 +-
src/lib/log/tests/logger_unittest.cc | 2 +-
src/lib/log/tests/message_dictionary_unittest.cc | 4 +-
src/lib/log/tests/message_reader_unittest.cc | 24 +-
src/lib/log/tests/severity_test.sh.in | 52 +-
src/lib/python/isc/notify/notify_out.py | 100 +-
src/lib/python/isc/notify/tests/notify_out_test.py | 32 +-
src/lib/server_common/Makefile.am | 4 +-
src/lib/server_common/client.cc | 75 +
src/lib/server_common/client.h | 165 ++
src/lib/server_common/tests/Makefile.am | 3 +
src/lib/server_common/tests/client_unittest.cc | 127 ++
tools/system_messages.py | 413 ++++
117 files changed, 9445 insertions(+), 2170 deletions(-)
create mode 100644 doc/guide/bind10-messages.html
create mode 100644 doc/guide/bind10-messages.xml
create mode 100644 src/bin/auth/auth_log.cc
create mode 100644 src/bin/auth/auth_log.h
create mode 100644 src/bin/auth/auth_messages.mes
create mode 100644 src/bin/cfgmgr/plugins/tests/logging_test.py
create mode 100644 src/bin/resolver/resolver_messages.mes
delete mode 100644 src/bin/resolver/resolverdef.mes
create mode 100644 src/bin/xfrin/xfrin_messages.mes
create mode 100644 src/bin/xfrout/xfrout_messages.mes
create mode 100644 src/lib/acl/dns.cc
create mode 100644 src/lib/acl/dns.h
create mode 100644 src/lib/acl/ip_check.cc
create mode 100644 src/lib/acl/ip_check.h
create mode 100644 src/lib/acl/logic_check.h
create mode 100644 src/lib/acl/tests/creators.h
create mode 100644 src/lib/acl/tests/dns_test.cc
create mode 100644 src/lib/acl/tests/ip_check_unittest.cc
create mode 100644 src/lib/acl/tests/logic_check_test.cc
delete mode 100644 src/lib/asiodns/asiodef.mes
create mode 100644 src/lib/asiodns/asiodns_messages.mes
create mode 100644 src/lib/config/config_messages.mes
delete mode 100644 src/lib/config/configdef.mes
create mode 100644 src/lib/datasrc/datasrc_messages.mes
delete mode 100644 src/lib/datasrc/messagedef.mes
delete mode 100644 src/lib/log/impldef.cc
delete mode 100644 src/lib/log/impldef.h
delete mode 100644 src/lib/log/impldef.mes
create mode 100644 src/lib/log/log_messages.cc
create mode 100644 src/lib/log/log_messages.h
create mode 100644 src/lib/log/log_messages.mes
create mode 100644 src/lib/log/logimpl_messages.cc
create mode 100644 src/lib/log/logimpl_messages.h
create mode 100644 src/lib/log/logimpl_messages.mes
delete mode 100644 src/lib/log/messagedef.cc
delete mode 100644 src/lib/log/messagedef.h
delete mode 100644 src/lib/log/messagedef.mes
create mode 100644 src/lib/server_common/client.cc
create mode 100644 src/lib/server_common/client.h
create mode 100644 src/lib/server_common/tests/client_unittest.cc
create mode 100644 tools/system_messages.py
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 3b1d518..4616678 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,13 +1,52 @@
-260. [func] stephen
+265. [func]* jinmei
+ b10-resolver: Introduced ACL on incoming queries. By default the
+ resolver accepts queries from ::1 and 127.0.0.1 and rejects all
+ others. The ACL can be configured with bindctl via the
+ "Resolver/query_acl" parameter. For example, to accept queries
+ from 192.0.2.0/24 (in addition to the default list), do this:
+ > config add Resolver/query_acl
+ > config set Resolver/query_acl[2]/action "ACCEPT"
+ > config set Resolver/query_acl[2]/from "192.0.2.0/24"
+ > config commit
+ (Trac #999, git e0744372924442ec75809d3964e917680c57a2ce,
+ also based on other ACL related work done by stephen and vorner)
+
+264. [bug] jerry
+ b10-xfrout: fixed a busy loop in its notify-out subthread. Due to
+ the loop, the thread previously woke up every 0.5 seconds throughout
+ most of the lifetime of b10-xfrout, wasting the corresponding CPU
+ time.
+ (Trac #1001, git fb993ba8c52dca4a3a261e319ed095e5af8db15a)
+
+263. [func] jelte
+ Logging configuration can now also accept a * as a first-level
+ name (e.g. '*', or '*.cache'), indicating that every module
+ should use that configuration, unless overridden by an explicit
+ logging configuration for that module
+ (Trac #1004, git 0fad7d4a8557741f953eda9fed1d351a3d9dc5ef)
+
+262. [func] stephen
+ Add some initial documentation about the logging framework.
+ Provide BIND 10 Messages Manual in HTML and DocBook? XML formats.
+ This provides all the log message descriptions in a single document.
+ A developer tool, tools/system_messages.py (available in git repo),
+ was written to generate this.
+ (Trac #1012, git 502100d7b9cd9d2300e78826a3bddd024ef38a74)
+
+261. [func] stephen
+ Add new-style logging messages to b10-auth.
+ (Trac #738, git c021505a1a0d6ecb15a8fd1592b94baff6d115f4)
+
+260. [func] stephen
Remove comma between message identification and the message
text in the new-style logging messages.
- (Trac 1031, git 1c7930a7ba19706d388e4f8dcf2a55a886b74cd2)
+ (Trac #1031, git 1c7930a7ba19706d388e4f8dcf2a55a886b74cd2)
-259. [bug] stephen
+259. [bug] stephen
Logging now correctly initialized in b10-auth. Also, fixed
bug whereby querying for "version.bind txt ch" would cause
b10-auth to crash if BIND 10 was started with the "-v" switch.
- (Trac 1022,1023, git 926a65fa08617be677a93e9e388df0f229b01067)
+ (Trac #1022,#1023, git 926a65fa08617be677a93e9e388df0f229b01067)
258. [build] jelte
Now builds and runs with Python 3.2
diff --git a/doc/guide/Makefile.am b/doc/guide/Makefile.am
index c790139..c84ad06 100644
--- a/doc/guide/Makefile.am
+++ b/doc/guide/Makefile.am
@@ -1,10 +1,12 @@
EXTRA_DIST = bind10-guide.css
-EXTRA_DIST += bind10-guide.html
-EXTRA_DIST += bind10-guide.xml
+EXTRA_DIST += bind10-guide.xml bind10-guide.html
+EXTRA_DIST += bind10-messages.xml bind10-messages.html
# This is not a "man" manual, but reuse this for now for docbook.
if ENABLE_MAN
+.PHONY: bind10-messages.xml
+
bind10-guide.html: bind10-guide.xml
xsltproc --novalid --xinclude --nonet \
--path $(top_builddir)/doc \
@@ -13,4 +15,16 @@ bind10-guide.html: bind10-guide.xml
http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl \
$(srcdir)/bind10-guide.xml
+bind10-messages.html: bind10-messages.xml
+ xsltproc --novalid --xinclude --nonet \
+ --path $(top_builddir)/doc \
+ -o $@ \
+ --stringparam html.stylesheet $(srcdir)/bind10-guide.css \
+ http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl \
+ $(srcdir)/bind10-messages.xml
+
+# So many dependencies that it's easiest just to regenerate it every time
+bind10-messages.xml:
+ $(PYTHON) $(top_srcdir)/tools/system_messages.py -o $@ $(top_srcdir)
+
endif
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 5b127f3..5754cf0 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,12 +1,12 @@
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
- 20110519.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+ 20110519.</p></div><div><p class="copyright">Copyright © 2010 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
This is the reference guide for BIND 10 version 20110519.
The most up-to-date version of this document, along with
- other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284848">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285037">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285057">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285117">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285216">Build</a></span></dt><dt><span class="section"><a href="#id1168230285230">Install</a></span></dt><dt><span class="section"><a href="#id1168230285254">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285829">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285894">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285924">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286380">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+ other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
BIND is the popular implementation of a DNS server, developer
interfaces, and DNS tools.
BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++ and Python
@@ -33,10 +33,8 @@
3.1 is the minimum version which will work.
</p><p>
BIND 10 uses the Botan crypto library for C++. It requires
- at least Botan version 1.8.
- </p><p>
- BIND 10 uses the log4cplus C++ logging library. It requires
- at least log4cplus version 1.0.3.
+ at least Botan version 1.8. To build BIND 10, install the
+ Botan libraries and development include headers.
</p><p>
The authoritative server requires SQLite 3.3.9 or newer.
The <span class="command"><strong>b10-xfrin</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>,
@@ -138,10 +136,7 @@
and, of course, DNS. These include detailed developer
documentation and code examples.
- </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284848">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285037">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285057">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285117">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285216">Build</a></span></dt><dt><span class="section"><a href="#id1168230285230">Install</a></span></dt><dt><span class="section"><a href="#id1168230285254">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284848"></a>Building Requirements</h2></div></div></div><p>
- In addition to the run-time requirements, building BIND 10
- from source code requires various development include headers.
- </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284846"></a>Building Requirements</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Some operating systems have split their distribution packages into
a run-time and a development package. You will need to install
the development package versions, which include header files and
@@ -152,11 +147,6 @@
</p><p>
- To build BIND 10, also install the Botan (at least version
- 1.8) and the log4cplus (at least version 1.0.3)
- development include headers.
- </p><p>
-
The Python Library and Python _sqlite3 module are required to
enable the Xfrout and Xfrin support.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
@@ -166,7 +156,7 @@
Building BIND 10 also requires a C++ compiler and
standard development headers, make, and pkg-config.
BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
- 4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
+ 4.1.3, 4.2.1, 4.3.2, and 4.4.1.
</p></div><div class="section" title="Quick start"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="quickstart"></a>Quick start</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This quickly covers the standard steps for installing
and deploying BIND 10 as an authoritative name server using
@@ -202,14 +192,14 @@
the Git code revision control system or as a downloadable
tar file. It may also be available in pre-compiled ready-to-use
packages from operating system vendors.
- </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285037"></a>Download Tar File</h3></div></div></div><p>
+ </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285026"></a>Download Tar File</h3></div></div></div><p>
Downloading a release tar file is the recommended method to
obtain the source code.
</p><p>
The BIND 10 releases are available as tar file downloads from
<a class="ulink" href="ftp://ftp.isc.org/isc/bind10/" target="_top">ftp://ftp.isc.org/isc/bind10/</a>.
Periodic development snapshots may also be available.
- </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285057"></a>Retrieve from Git</h3></div></div></div><p>
+ </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285045"></a>Retrieve from Git</h3></div></div></div><p>
Downloading this "bleeding edge" code is recommended only for
developers or advanced users. Using development code in a production
environment is not recommended.
@@ -243,7 +233,7 @@
<span class="command"><strong>autoheader</strong></span>,
<span class="command"><strong>automake</strong></span>,
and related commands.
- </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285117"></a>Configure before the build</h3></div></div></div><p>
+ </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285106"></a>Configure before the build</h3></div></div></div><p>
BIND 10 uses the GNU Build System to discover build environment
details.
To generate the makefiles using the defaults, simply run:
@@ -274,16 +264,16 @@
</p><p>
If the configure fails, it may be due to missing or old
dependencies.
- </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285216"></a>Build</h3></div></div></div><p>
+ </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285203"></a>Build</h3></div></div></div><p>
After the configure step is complete, to build the executables
from the C++ code and prepare the Python scripts, run:
</p><pre class="screen">$ <strong class="userinput"><code>make</code></strong></pre><p>
- </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285230"></a>Install</h3></div></div></div><p>
+ </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285219"></a>Install</h3></div></div></div><p>
To install the BIND 10 executables, support files,
and documentation, run:
</p><pre class="screen">$ <strong class="userinput"><code>make install</code></strong></pre><p>
- </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285254"></a>Install Hierarchy</h3></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285242"></a>Install Hierarchy</h3></div></div></div><p>
The following is the layout of the complete BIND 10 installation:
</p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem">
<code class="filename">bin/</code> —
@@ -500,12 +490,12 @@ shutdown
the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
channel) the configuration on to the specified module.
</p><p>
- </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285829">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285894">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285924">Loading Master Zones Files</a></span></dt></dl></div><p>
+ </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
It supports EDNS0 and DNSSEC. It supports IPv6.
Normally it is started by the <span class="command"><strong>bind10</strong></span> master
process.
- </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285829"></a>Server Configurations</h2></div></div></div><p>
+ </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285816"></a>Server Configurations</h2></div></div></div><p>
<span class="command"><strong>b10-auth</strong></span> is configured via the
<span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -525,7 +515,7 @@ This may be a temporary setting until then.
</p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
</dd></dl></div><p>
- </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285894"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285881"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
supports a SQLite3 data source backend and in-memory data source
backend.
@@ -539,7 +529,7 @@ This may be a temporary setting until then.
The default is <code class="filename">/usr/local/var/</code>.)
This data file location may be changed by defining the
<span class="quote">“<span class="quote">database_file</span>”</span> configuration.
- </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285924"></a>Loading Master Zones Files</h2></div></div></div><p>
+ </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285912"></a>Loading Master Zones Files</h2></div></div></div><p>
RFC 1035 style DNS master zone files may imported
into a BIND 10 data source by using the
<span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -617,7 +607,7 @@ This may be a temporary setting until then.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Access control (such as allowing notifies) is not yet provided.
The primary/secondary service is not yet complete.
- </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286380">Forwarding</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-resolver</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
@@ -646,7 +636,7 @@ This may be a temporary setting until then.
> <strong class="userinput"><code>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</code></strong>
> <strong class="userinput"><code>config commit</code></strong>
</pre><p>
- </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286380"></a>Forwarding</h2></div></div></div><p>
+ </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286300"></a>Forwarding</h2></div></div></div><p>
To enable forwarding, the upstream address and port must be
configured to forward queries to, such as:
@@ -694,4 +684,48 @@ This may be a temporary setting until then.
"stats.timestamp": 1295543046.823504
}
</pre><p>
+ </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><p>
+ Each message written by BIND 10 to the configured logging destinations
+ comprises a number of components that identify the origin of the
+ message and, if the message indicates a problem, information about the
+ problem that may be useful in fixing it.
+ </p><p>
+ Consider the message below logged to a file:
+ </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</pre><p>
+ </p><p>
+ Note: the layout of messages written to the system logging
+ file (syslog) may be slightly different. This message has
+ been split across two lines here for display reasons; in the
+ logging file, it will appear on one line.)
+ </p><p>
+ The log message comprises a number of components:
+
+ </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
+ The date and time at which the message was generated.
+ </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
+ The severity of the message.
+ </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
+ The source of the message. This comprises two components:
+ the BIND 10 process generating the message (in this
+ case, <span class="command"><strong>b10-resolver</strong></span>) and the module
+ within the program from which the message originated
+ (which in the example is the asynchronous I/O link
+ module, asiolink).
+ </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
+ The message identification. Every message in BIND 10
+ has a unique identification, which can be used as an
+ index into the <a class="ulink" href="bind10-messages.html" target="_top"><em class="citetitle">BIND 10 Messages
+ Manual</em></a> (<a class="ulink" href="http://bind10.isc.org/docs/bind10-messages.html" target="_top">http://bind10.isc.org/docs/bind10-messages.html</a>) from which more information can be obtained.
+ </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
+ A brief description of the cause of the problem. Within this text,
+ information relating to the condition that caused the message to
+ be logged will be included. In this example, error number 111
+ (an operating system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the local system
+ (address 127.0.0.1). The next step would be to find out the reason
+ for the failure by consulting your system's documentation to
+ identify what error number 111 means.
+ </p></dd></dl></div><p>
+
</p></div></div></body></html>
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index eb6fea0..7d1a006 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -1450,6 +1450,92 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
</chapter>
+ <chapter id="logging">
+ <title>Logging</title>
+
+<!-- TODO: how to configure logging, logging destinations etc. -->
+
+ <para>
+ Each message written by BIND 10 to the configured logging destinations
+ comprises a number of components that identify the origin of the
+ message and, if the message indicates a problem, information about the
+ problem that may be useful in fixing it.
+ </para>
+
+ <para>
+ Consider the message below logged to a file:
+ <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
+ </para>
+
+ <para>
+ Note: the layout of messages written to the system logging
+ file (syslog) may be slightly different. This message has
+ been split across two lines here for display reasons; in the
+ logging file, it will appear on one line.)
+ </para>
+
+ <para>
+ The log message comprises a number of components:
+
+ <variablelist>
+ <varlistentry>
+ <term>2011-06-15 13:48:22.034</term>
+ <listitem><para>
+ The date and time at which the message was generated.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ERROR</term>
+ <listitem><para>
+ The severity of the message.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>[b10-resolver.asiolink]</term>
+ <listitem><para>
+ The source of the message. This comprises two components:
+ the BIND 10 process generating the message (in this
+ case, <command>b10-resolver</command>) and the module
+ within the program from which the message originated
+ (which in the example is the asynchronous I/O link
+ module, asiolink).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ASIODNS_OPENSOCK</term>
+ <listitem><para>
+ The message identification. Every message in BIND 10
+ has a unique identification, which can be used as an
+ index into the <ulink
+ url="bind10-messages.html"><citetitle>BIND 10 Messages
+ Manual</citetitle></ulink> (<ulink
+ url="http://bind10.isc.org/docs/bind10-messages.html"
+ />) from which more information can be obtained.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
+ <listitem><para>
+ A brief description of the cause of the problem. Within this text,
+ information relating to the condition that caused the message to
+ be logged will be included. In this example, error number 111
+ (an operating system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the local system
+ (address 127.0.0.1). The next step would be to find out the reason
+ for the failure by consulting your system's documentation to
+ identify what error number 111 means.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+
+ </para>
+ </chapter>
+
<!-- TODO: how to help: run unit tests, join lists, review trac tickets -->
<!-- <index> <title>Index</title> </index> -->
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
new file mode 100644
index 0000000..b075e96
--- /dev/null
+++ b/doc/guide/bind10-messages.html
@@ -0,0 +1,841 @@
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+ 20110519.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+ Internet Systems Consortium (ISC). It includes DNS libraries
+ and modular components for controlling authoritative and
+ recursive DNS servers.
+ </p><p>
+ This is the messages manual for BIND 10 version 20110519.
+ The most up-to-date version of this document, along with
+ other documents for BIND 10, can be found at
+ <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
+ </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dt><span class="chapter"><a href="#messages">2. BIND 10 Messages</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><p>
+ This document lists each message that can be logged by the
+ programs in the BIND 10 package. Each entry in this manual
+ is of the form:
+ </p><pre class="screen">IDENTIFICATION message-text</pre><p>
+ ... where "IDENTIFICATION" is the message identification included
+ in each message logged and "message-text" is the accompanying
+ message text. The "message-text" may include placeholders of the
+ form "%1", "%2" etc.; these parameters are replaced by relevant
+ values when the message is logged.
+ </p><p>
+ Each entry is also accompanied by a description giving more
+ information about the circumstances that result in the message
+ being logged.
+ </p><p>
+ For information on configuring and using BIND 10 logging,
+ refer to the <a class="ulink" href="bind10-guide.html" target="_top">BIND 10 Guide</a>.
+ </p></div><div class="chapter" title="Chapter 2. BIND 10 Messages"><div class="titlepage"><div><div><h2 class="title"><a name="messages"></a>Chapter 2. BIND 10 Messages</h2></div></div></div><p>
+ </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCHCOMP"></a><span class="term">ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</span></dt><dd><p>
+A debug message, this records the the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+</p></dd><dt><a name="ASIODNS_FETCHSTOP"></a><span class="term">ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
+An external component has requested the halting of an upstream fetch. This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+</p></dd><dt><a name="ASIODNS_OPENSOCK"></a><span class="term">ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The the number of the system error that cause the problem is given in the
+message.
+</p></dd><dt><a name="ASIODNS_RECVSOCK"></a><span class="term">ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying read data from
+the specified address on the given protocol. The the number of the system
+error that cause the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_RECVTMO"></a><span class="term">ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
+An upstream fetch from the specified address timed out. This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network. The message will only appear if debug is
+enabled.
+</p></dd><dt><a name="ASIODNS_SENDSOCK"></a><span class="term">ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying send data to
+the specified address on the given protocol. The the number of the system
+error that cause the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_UNKORIGIN"></a><span class="term">ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
+This message should not appear and indicates an internal error if it does.
+Please enter a bug report.
+</p></dd><dt><a name="ASIODNS_UNKRESULT"></a><span class="term">ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
+The termination method of the resolver's upstream fetch class was called with
+an unknown result code (which is given in the message). This message should
+not appear and may indicate an internal error. Please enter a bug report.
+</p></dd><dt><a name="CONFIG_CCSESSION_MSG"></a><span class="term">CONFIG_CCSESSION_MSG error in CC session message: %1</span></dt><dd><p>
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+</p></dd><dt><a name="CONFIG_CCSESSION_MSG_INTERNAL"></a><span class="term">CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</span></dt><dd><p>
+There was an internal problem handling an incoming message on the
+command and control channel. An unexpected exception was thrown. This
+most likely points to an internal inconsistency in the module code. The
+exception message is appended to the log error, and the module will
+continue to run, but will not send back an answer.
+</p></dd><dt><a name="CONFIG_FOPEN_ERR"></a><span class="term">CONFIG_FOPEN_ERR error opening %1: %2</span></dt><dd><p>
+There was an error opening the given file.
+</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
+There was a parse error in the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</p></dd><dt><a name="CONFIG_MANAGER_CONFIG"></a><span class="term">CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</span></dt><dd><p>
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</p></dd><dt><a name="CONFIG_MANAGER_MOD_SPEC"></a><span class="term">CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</span></dt><dd><p>
+The module specification file for this module was rejected by the
+configuration manager. The full error message answer from the
+configuration manager is appended to the log error. The most likely
+cause is that the module is of a different (specification file) version
+than the running configuration manager.
+</p></dd><dt><a name="CONFIG_MODULE_SPEC"></a><span class="term">CONFIG_MODULE_SPEC module specification error in %1: %2</span></dt><dd><p>
+The given file does not appear to be a valid specification file. Please
+verify that the filename is correct and that its contents are a valid
+BIND10 module specification.
+</p></dd><dt><a name="DATASRC_CACHE_CREATE"></a><span class="term">DATASRC_CACHE_CREATE creating the hotspot cache</span></dt><dd><p>
+Debug information that the hotspot cache was created at startup.
+</p></dd><dt><a name="DATASRC_CACHE_DESTROY"></a><span class="term">DATASRC_CACHE_DESTROY destroying the hotspot cache</span></dt><dd><p>
+Debug information. The hotspot cache is being destroyed.
+</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the cache</span></dt><dd><p>
+The hotspot cache is disabled from now on. It is not going to store
+information or return anything.
+</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the cache</span></dt><dd><p>
+The hotspot cache is enabled from now on.
+</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED the item '%1' is expired</span></dt><dd><p>
+Debug information. There was an attempt to look up an item in the hotspot
+cache. And the item was actually there, but it was too old, so it was removed
+instead and nothing is reported (the external behaviour is the same as with
+CACHE_NOT_FOUND).
+</p></dd><dt><a name="DATASRC_CACHE_FOUND"></a><span class="term">DATASRC_CACHE_FOUND the item '%1' was found</span></dt><dd><p>
+Debug information. An item was successfully looked up in the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL cache is full, dropping oldest</span></dt><dd><p>
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the cache</span></dt><dd><p>
+Debug information. It means a new item is being inserted into the hotspot
+cache.
+</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found</span></dt><dd><p>
+Debug information. It was attempted to look up an item in the hotspot cache,
+but it is not there.
+</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</span></dt><dd><p>
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found. The old instance will be
+removed. This should be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the cache</span></dt><dd><p>
+Debug information. An item is being removed from the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</span></dt><dd><p>
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+</p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
+Debug information. We're processing some internal query for given name and
+type.
+</p></dd><dt><a name="DATASRC_MEM_ADD_RRSET"></a><span class="term">DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</span></dt><dd><p>
+Debug information. An RRset is being added to the in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_ADD_WILDCARD"></a><span class="term">DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</span></dt><dd><p>
+Debug information. Some special marks above each * in wildcard name are needed.
+They are being added now for this name.
+</p></dd><dt><a name="DATASRC_MEM_ADD_ZONE"></a><span class="term">DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</span></dt><dd><p>
+Debug information. A zone is being added into the in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_ANY_SUCCESS"></a><span class="term">DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</span></dt><dd><p>
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+</p></dd><dt><a name="DATASRC_MEM_CNAME"></a><span class="term">DATASRC_MEM_CNAME CNAME at the domain '%1'</span></dt><dd><p>
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+</p></dd><dt><a name="DATASRC_MEM_CNAME_COEXIST"></a><span class="term">DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</span></dt><dd><p>
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some outher data to CNAME.
+</p></dd><dt><a name="DATASRC_MEM_CNAME_TO_NONEMPTY"></a><span class="term">DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</span></dt><dd><p>
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_CREATE"></a><span class="term">DATASRC_MEM_CREATE creating zone '%1' in '%2' class</span></dt><dd><p>
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+</p></dd><dt><a name="DATASRC_MEM_DELEG_FOUND"></a><span class="term">DATASRC_MEM_DELEG_FOUND delegation found at '%1'</span></dt><dd><p>
+Debug information. A delegation point was found above the requested record.
+</p></dd><dt><a name="DATASRC_MEM_DESTROY"></a><span class="term">DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class</span></dt><dd><p>
+Debug information. A zone from in-memory data source is being destroyed.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_ENCOUNTERED"></a><span class="term">DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME</span></dt><dd><p>
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way. This may lead to redirection to a different domain and
+stop the search.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_FOUND"></a><span class="term">DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</span></dt><dd><p>
+Debug information. A DNAME was found instead of the requested information.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
+It was requested for DNAME and NS records to be put into the same domain
+which is not the apex (the top of the zone). This is forbidden by RFC
+2672, section 3. This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_DOMAIN_EMPTY"></a><span class="term">DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</span></dt><dd><p>
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+</p></dd><dt><a name="DATASRC_MEM_DUP_RRSET"></a><span class="term">DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'</span></dt><dd><p>
+An RRset is being inserted into in-memory data source for a second time. The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+</p></dd><dt><a name="DATASRC_MEM_EXACT_DELEGATION"></a><span class="term">DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'</span></dt><dd><p>
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+</p></dd><dt><a name="DATASRC_MEM_FIND"></a><span class="term">DATASRC_MEM_FIND find '%1/%2'</span></dt><dd><p>
+Debug information. A search for the requested RRset is being started.
+</p></dd><dt><a name="DATASRC_MEM_FIND_ZONE"></a><span class="term">DATASRC_MEM_FIND_ZONE looking for zone '%1'</span></dt><dd><p>
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_LOAD"></a><span class="term">DATASRC_MEM_LOAD loading zone '%1' from file '%2'</span></dt><dd><p>
+Debug information. The content of master file is being loaded into the memory.
+</p></dd><dt><a name="DATASRC_MEM_NOTFOUND"></a><span class="term">DATASRC_MEM_NOTFOUND requested domain '%1' not found</span></dt><dd><p>
+Debug information. The requested domain does not exist.
+</p></dd><dt><a name="DATASRC_MEM_NS_ENCOUNTERED"></a><span class="term">DATASRC_MEM_NS_ENCOUNTERED encountered a NS</span></dt><dd><p>
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+</p></dd><dt><a name="DATASRC_MEM_NXRRSET"></a><span class="term">DATASRC_MEM_NXRRSET no such type '%1' at '%2'</span></dt><dd><p>
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+</p></dd><dt><a name="DATASRC_MEM_OUT_OF_ZONE"></a><span class="term">DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'</span></dt><dd><p>
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_RENAME"></a><span class="term">DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'</span></dt><dd><p>
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+</p></dd><dt><a name="DATASRC_MEM_SINGLETON"></a><span class="term">DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'</span></dt><dd><p>
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_SUCCESS"></a><span class="term">DATASRC_MEM_SUCCESS query for '%1/%2' successful</span></dt><dd><p>
+Debug information. The requested record was found.
+</p></dd><dt><a name="DATASRC_MEM_SUPER_STOP"></a><span class="term">DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty</span></dt><dd><p>
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+</p></dd><dt><a name="DATASRC_MEM_SWAP"></a><span class="term">DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')</span></dt><dd><p>
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_CANCEL"></a><span class="term">DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'</span></dt><dd><p>
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here. This
+behaviour is specified by RFC 1034, section 4.3.3
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</span></dt><dd><p>
+The software refuses to load DNAME records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</span></dt><dd><p>
+The software refuses to load NS records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</p></dd><dt><a name="DATASRC_META_ADD"></a><span class="term">DATASRC_META_ADD adding a data source into meta data source</span></dt><dd><p>
+Debug information. Yet another data source is being added into the meta data
+source. (probably at startup or reconfiguration)
+</p></dd><dt><a name="DATASRC_META_ADD_CLASS_MISMATCH"></a><span class="term">DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</span></dt><dd><p>
+It was attempted to add a data source into a meta data source. But their
+classes do not match.
+</p></dd><dt><a name="DATASRC_META_REMOVE"></a><span class="term">DATASRC_META_REMOVE removing data source from meta data source</span></dt><dd><p>
+Debug information. A data source is being removed from meta data source.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_NSEC"></a><span class="term">DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'</span></dt><dd><p>
+Debug information. A NSEC record covering this zone is being added.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_NSEC3"></a><span class="term">DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'</span></dt><dd><p>
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_RRSET"></a><span class="term">DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message</span></dt><dd><p>
+Debug information. An RRset is being added to the response message.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_SOA"></a><span class="term">DATASRC_QUERY_ADD_SOA adding SOA of '%1'</span></dt><dd><p>
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+</p></dd><dt><a name="DATASRC_QUERY_AUTH_FAIL"></a><span class="term">DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+</p></dd><dt><a name="DATASRC_QUERY_BAD_REFERRAL"></a><span class="term">DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</span></dt><dd><p>
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in cache</span></dt><dd><p>
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</span></dt><dd><p>
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+</p></dd><dt><a name="DATASRC_QUERY_COPY_AUTH"></a><span class="term">DATASRC_QUERY_COPY_AUTH copying authoritative section into message</span></dt><dd><p>
+Debug information. The whole referral information is being copied into the
+response message.
+</p></dd><dt><a name="DATASRC_QUERY_DELEGATION"></a><span class="term">DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</span></dt><dd><p>
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</span></dt><dd><p>
+There was an CNAME and it was being followed. But it contains no records,
+so there's nowhere to go. There will be no answer. This indicates a problem
+with supplied data.
+We tried to follow
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_DNAME"></a><span class="term">DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</span></dt><dd><p>
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+</p></dd><dt><a name="DATASRC_QUERY_FAIL"></a><span class="term">DATASRC_QUERY_FAIL query failed</span></dt><dd><p>
+Some subtask of query processing failed. The reason should have been reported
+already. We are returning SERVFAIL.
+</p></dd><dt><a name="DATASRC_QUERY_FOLLOW_CNAME"></a><span class="term">DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</span></dt><dd><p>
+Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
+for it already), so it's being followed.
+</p></dd><dt><a name="DATASRC_QUERY_GET_MX_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</span></dt><dd><p>
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</p></dd><dt><a name="DATASRC_QUERY_GET_NS_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'</span></dt><dd><p>
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</p></dd><dt><a name="DATASRC_QUERY_GLUE_FAIL"></a><span class="term">DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+</p></dd><dt><a name="DATASRC_QUERY_INVALID_OP"></a><span class="term">DATASRC_QUERY_INVALID_OP invalid query operation requested</span></dt><dd><p>
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+</p></dd><dt><a name="DATASRC_QUERY_IS_AUTH"></a><span class="term">DATASRC_QUERY_IS_AUTH auth query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is an auth query.
+</p></dd><dt><a name="DATASRC_QUERY_IS_GLUE"></a><span class="term">DATASRC_QUERY_IS_GLUE glue query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is query for glue addresses.
+</p></dd><dt><a name="DATASRC_QUERY_IS_NOGLUE"></a><span class="term">DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is query for addresses that are not
+glue.
+</p></dd><dt><a name="DATASRC_QUERY_IS_REF"></a><span class="term">DATASRC_QUERY_IS_REF query for referral (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is query for referral information.
+</p></dd><dt><a name="DATASRC_QUERY_IS_SIMPLE"></a><span class="term">DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is a simple query.
+</p></dd><dt><a name="DATASRC_QUERY_MISPLACED_TASK"></a><span class="term">DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</span></dt><dd><p>
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+</p></dd><dt><a name="DATASRC_QUERY_MISSING_NS"></a><span class="term">DATASRC_QUERY_MISSING_NS missing NS records for '%1'</span></dt><dd><p>
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+</p></dd><dt><a name="DATASRC_QUERY_MISSING_SOA"></a><span class="term">DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA</span></dt><dd><p>
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NOGLUE_FAIL"></a><span class="term">DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</span></dt><dd><p>
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC3"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone</span></dt><dd><p>
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NO_ZONE"></a><span class="term">DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'</span></dt><dd><p>
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+</p></dd><dt><a name="DATASRC_QUERY_PROCESS"></a><span class="term">DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</span></dt><dd><p>
+Debug information. A sure query is being processed now.
+</p></dd><dt><a name="DATASRC_QUERY_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+</p></dd><dt><a name="DATASRC_QUERY_REF_FAIL"></a><span class="term">DATASRC_QUERY_REF_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+</p></dd><dt><a name="DATASRC_QUERY_RRSIG"></a><span class="term">DATASRC_QUERY_RRSIG unable to answer RRSIG query</span></dt><dd><p>
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+</p></dd><dt><a name="DATASRC_QUERY_SIMPLE_FAIL"></a><span class="term">DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</p></dd><dt><a name="DATASRC_QUERY_SYNTH_CNAME"></a><span class="term">DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</span></dt><dd><p>
+Debug information. While answering a query, a DNAME was met. The DNAME itself
+will be returned, but along with it a CNAME for clients which don't understand
+DNAMEs will be synthesized.
+</p></dd><dt><a name="DATASRC_QUERY_TASK_FAIL"></a><span class="term">DATASRC_QUERY_TASK_FAIL task failed with %1</span></dt><dd><p>
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</span></dt><dd><p>
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+</p></dd><dt><a name="DATASRC_QUERY_UNKNOWN_RESULT"></a><span class="term">DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask</span></dt><dd><p>
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD"></a><span class="term">DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'</span></dt><dd><p>
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</span></dt><dd><p>
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record. The code is 1 for error and 2 for not implemented.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_REFERRAL"></a><span class="term">DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</span></dt><dd><p>
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it. The code is 1 for error, 2 for not implemented.
+</p></dd><dt><a name="DATASRC_SQLITE_CLOSE"></a><span class="term">DATASRC_SQLITE_CLOSE closing SQLite database</span></dt><dd><p>
+Debug information. The SQLite data source is closing the database file.
+</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE sQLite data source created</span></dt><dd><p>
+Debug information. An instance of SQLite data source is being created.
+</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY sQLite data source destroyed</span></dt><dd><p>
+Debug information. An instance of SQLite data source is being destroyed.
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE"></a><span class="term">DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is trying to identify, which zone
+should hold this domain.
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOTFOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</span></dt><dd><p>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+no such zone in our data.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND"></a><span class="term">DATASRC_SQLITE_FIND looking for RRset '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up a resource record
+set.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDADDRS"></a><span class="term">DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'</span></dt><dd><p>
+Debug information. The data source is looking up the addresses for given
+domain name.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDADDRS_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDEXACT"></a><span class="term">DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up an exact resource
+record.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDEXACT_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREC"></a><span class="term">DATASRC_SQLITE_FINDREC looking for record '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREF"></a><span class="term">DATASRC_SQLITE_FINDREF looking for referral at '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREF_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was trying to identify, if there's a referral. But
+it contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'</span></dt><dd><p>
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</span></dt><dd><p>
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+</p></dd><dt><a name="DATASRC_SQLITE_OPEN"></a><span class="term">DATASRC_SQLITE_OPEN opening SQLite database '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS"></a><span class="term">DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</span></dt><dd><p>
+Debug information. We're trying to look up name preceding the supplied one.
+</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS_NO_ZONE"></a><span class="term">DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</span></dt><dd><p>
+The SQLite data source tried to identify name preceding this one. But this
+one is not contained in any zone in the data source.
+</p></dd><dt><a name="DATASRC_SQLITE_SETUP"></a><span class="term">DATASRC_SQLITE_SETUP setting up SQLite database</span></dt><dd><p>
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema. It'll still contain
+no data, but it will be ready for use.
+</p></dd><dt><a name="DATASRC_STATIC_BAD_CLASS"></a><span class="term">DATASRC_STATIC_BAD_CLASS static data source can handle CH only</span></dt><dd><p>
+For some reason, someone asked the static data source a query that is not in
+the CH class.
+</p></dd><dt><a name="DATASRC_STATIC_CREATE"></a><span class="term">DATASRC_STATIC_CREATE creating the static datasource</span></dt><dd><p>
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+</p></dd><dt><a name="DATASRC_STATIC_FIND"></a><span class="term">DATASRC_STATIC_FIND looking for '%1/%2'</span></dt><dd><p>
+Debug information. This resource record set is being looked up in the static
+data source.
+</p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
+This indicates a programming error. An internal task of unknown type was
+generated.
+</p></dd><dt><a name="LOGIMPL_ABOVEDBGMAX"></a><span class="term">LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is above the maximum allowed value and has
+been reduced to that value.
+</p></dd><dt><a name="LOGIMPL_BADDEBUG"></a><span class="term">LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</span></dt><dd><p>
+The string indicating the extended logging level (used by the underlying
+logger implementation code) is not of the stated form. In particular,
+it starts DEBUG but does not end with an integer.
+</p></dd><dt><a name="LOGIMPL_BELOWDBGMIN"></a><span class="term">LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is below the minimum allowed value and has
+been increased to that value.
+</p></dd><dt><a name="MSG_BADDESTINATION"></a><span class="term">MSG_BADDESTINATION unrecognized log destination: %1</span></dt><dd><p>
+A logger destination value was given that was not recognized. The
+destination should be one of "console", "file", or "syslog".
+</p></dd><dt><a name="MSG_BADSEVERITY"></a><span class="term">MSG_BADSEVERITY unrecognized log severity: %1</span></dt><dd><p>
+A logger severity value was given that was not recognized. The severity
+should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+</p></dd><dt><a name="MSG_BADSTREAM"></a><span class="term">MSG_BADSTREAM bad log console output stream: %1</span></dt><dd><p>
+A log console output stream was given that was not recognized. The
+output stream should be one of "stdout", or "stderr"
+</p></dd><dt><a name="MSG_DUPLNS"></a><span class="term">MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
+When reading a message file, more than one $NAMESPACE directive was found. In
+this version of the code, such a condition is regarded as an error and the
+read will be abandoned.
+</p></dd><dt><a name="MSG_DUPMSGID"></a><span class="term">MSG_DUPMSGID duplicate message ID (%1) in compiled code</span></dt><dd><p>
+Indicative of a programming error, when it started up, BIND10 detected that
+the given message ID had been registered by one or more modules. (All message
+IDs should be unique throughout BIND10.) This has no impact on the operation
+of the server other that erroneous messages may be logged. (When BIND10 loads
+the message IDs (and their associated text), if a duplicate ID is found it is
+discarded. However, when the module that supplied the duplicate ID logs that
+particular message, the text supplied by the module that added the original
+ID will be output - something that may bear no relation to the condition being
+logged.
+</p></dd><dt><a name="MSG_IDNOTFND"></a><span class="term">MSG_IDNOTFND could not replace message text for '%1': no such message</span></dt><dd><p>
+During start-up a local message file was read. A line with the listed
+message identification was found in the file, but the identification is not
+one contained in the compiled-in message dictionary. Either the message
+identification has been mis-spelled in the file, or the local file was used
+for an earlier version of the software and the message with that
+identification has been removed.
+</p><p>
+This message may appear a number of times in the file, once for every such
+unknown message identification.
+</p></dd><dt><a name="MSG_INVMSGID"></a><span class="term">MSG_INVMSGID line %1: invalid message identification '%2'</span></dt><dd><p>
+The concatenation of the prefix and the message identification is used as
+a symbol in the C++ module; as such it may only contain
+</p></dd><dt><a name="MSG_NOMSGID"></a><span class="term">MSG_NOMSGID line %1: message definition line found without a message ID</span></dt><dd><p>
+Message definition lines are lines starting with a "%". The rest of the line
+should comprise the message ID and text describing the message. This error
+indicates the message compiler found a line in the message file comprising
+just the "%" and nothing else.
+</p></dd><dt><a name="MSG_NOMSGTXT"></a><span class="term">MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
+Message definition lines are lines starting with a "%". The rest of the line
+should comprise the message ID and text describing the message. This error
+is generated when a line is found in the message file that contains the
+leading "%" and the message identification but no text.
+</p></dd><dt><a name="MSG_NSEXTRARG"></a><span class="term">MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed. This error is generated when the
+compiler finds a $NAMESPACE directive with more than one argument.
+</p></dd><dt><a name="MSG_NSINVARG"></a><span class="term">MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
+The $NAMESPACE argument should be a valid C++ namespace. The reader does a
+cursory check on its validity, checking that the characters in the namespace
+are correct. The error is generated when the reader finds an invalid
+character. (Valid are alphanumeric characters, underscores and colons.)
+</p></dd><dt><a name="MSG_NSNOARG"></a><span class="term">MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed. This error is generated when the
+compiler finds a $NAMESPACE directive with no arguments.
+</p></dd><dt><a name="MSG_OPENIN"></a><span class="term">MSG_OPENIN unable to open message file %1 for input: %2</span></dt><dd><p>
+The program was not able to open the specified input message file for the
+reason given.
+</p></dd><dt><a name="MSG_OPENOUT"></a><span class="term">MSG_OPENOUT unable to open %1 for output: %2</span></dt><dd><p>
+The program was not able to open the specified output file for the reason
+given.
+</p></dd><dt><a name="MSG_PRFEXTRARG"></a><span class="term">MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
+The $PREFIX directive takes a single argument, a prefix to be added to the
+symbol names when a C++ .h file is created. This error is generated when the
+compiler finds a $PREFIX directive with more than one argument.
+</p></dd><dt><a name="MSG_PRFINVARG"></a><span class="term">MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
+The $PREFIX argument is used in a symbol name in a C++ header file. As such,
+it must adhere to restrictions on C++ symbol names (e.g. may only contain
+alphanumeric characters or underscores, and may nor start with a digit).
+A $PREFIX directive was found with an argument (given in the message) that
+violates those restictions.
+</p></dd><dt><a name="MSG_RDLOCMES"></a><span class="term">MSG_RDLOCMES reading local message file %1</span></dt><dd><p>
+This is an informational message output by BIND10 when it starts to read a
+local message file. (A local message file may replace the text of one of more
+messages; the ID of the message will not be changed though.)
+</p></dd><dt><a name="MSG_READERR"></a><span class="term">MSG_READERR error reading from message file %1: %2</span></dt><dd><p>
+The specified error was encountered reading from the named message file.
+</p></dd><dt><a name="MSG_UNRECDIR"></a><span class="term">MSG_UNRECDIR line %1: unrecognised directive '%2'</span></dt><dd><p>
+A line starting with a dollar symbol was found, but the first word on the line
+(shown in the message) was not a recognised message compiler directive.
+</p></dd><dt><a name="MSG_WRITERR"></a><span class="term">MSG_WRITERR error writing to %1: %2</span></dt><dd><p>
+The specified error was encountered by the message compiler when writing to
+the named output file.
+</p></dd><dt><a name="NSAS_INVRESPSTR"></a><span class="term">NSAS_INVRESPSTR queried for %1 but got invalid response</span></dt><dd><p>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver. The NSAS made a query for a RR for the
+specified nameserver but received an invalid response. Either the success
+function was called without a DNS message or the message was invalid on some
+way. (In the latter case, the error should have been picked up elsewhere in
+the processing logic, hence the raising of the error here.)
+</p></dd><dt><a name="NSAS_INVRESPTC"></a><span class="term">NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver. The NSAS made a query for the given RR
+type and class, but instead received an answer with the given type and class.
+</p></dd><dt><a name="NSAS_LOOKUPCANCEL"></a><span class="term">NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</span></dt><dd><p>
+A debug message, this is output when a NSAS (nameserver address store -
+part of the resolver) lookup for a zone has been cancelled.
+</p></dd><dt><a name="NSAS_LOOKUPZONE"></a><span class="term">NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</span></dt><dd><p>
+A debug message, this is output when a call is made to the nameserver address
+store (part of the resolver) to obtain the nameservers for the specified zone.
+</p></dd><dt><a name="NSAS_NSADDR"></a><span class="term">NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
+A debug message, the NSAS (nameserver address store - part of the resolver) is
+making a callback into the resolver to retrieve the address records for the
+specified nameserver.
+</p></dd><dt><a name="NSAS_NSLKUPFAIL"></a><span class="term">NSAS_NSLKUPFAIL failed to lookup any %1 for %2</span></dt><dd><p>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has been unable to retrieve the specified resource record for the specified
+nameserver. This is not necessarily a problem - the nameserver may be
+unreachable, in which case the NSAS will try other nameservers in the zone.
+</p></dd><dt><a name="NSAS_NSLKUPSUCC"></a><span class="term">NSAS_NSLKUPSUCC found address %1 for %2</span></dt><dd><p>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has retrieved the given address for the specified nameserver through an
+external query.
+</p></dd><dt><a name="NSAS_SETRTT"></a><span class="term">NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</span></dt><dd><p>
+A NSAS (nameserver address store - part of the resolver) debug message
+reporting the round-trip time (RTT) for a query made to the specified
+nameserver. The RTT has been updated using the value given and the new RTT is
+displayed. (The RTT is subject to a calculation that damps out sudden
+changes. As a result, the new RTT is not necessarily equal to the RTT
+reported.)
+</p></dd><dt><a name="RESLIB_ANSWER"></a><span class="term">RESLIB_ANSWER answer received in response to query for <%1></span></dt><dd><p>
+A debug message recording that an answer has been received to an upstream
+query for the specified question. Previous debug messages will have indicated
+the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_CNAME"></a><span class="term">RESLIB_CNAME CNAME received in response to query for <%1></span></dt><dd><p>
+A debug message recording that CNAME response has been received to an upstream
+query for the specified question. Previous debug messages will have indicated
+the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_DEEPEST"></a><span class="term">RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</span></dt><dd><p>
+A debug message, a cache lookup did not find the specified <name, class,
+type> tuple in the cache; instead, the deepest delegation found is indicated.
+</p></dd><dt><a name="RESLIB_FOLLOWCNAME"></a><span class="term">RESLIB_FOLLOWCNAME following CNAME chain to <%1></span></dt><dd><p>
+A debug message, a CNAME response was received and another query is being issued
+for the <name, class, type> tuple.
+</p></dd><dt><a name="RESLIB_LONGCHAIN"></a><span class="term">RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
+A debug message recording that a CNAME response has been received to an upstream
+query for the specified question (Previous debug messages will have indicated
+the server to which the question was sent). However, receipt of this CNAME
+has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
+is where on CNAME points to another) and so an error is being returned.
+</p></dd><dt><a name="RESLIB_NONSRRSET"></a><span class="term">RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
+A debug message, this indicates that a response was received for the specified
+query and was categorised as a referral. However, the received message did
+not contain any NS RRsets. This may indicate a programming error in the
+response classification code.
+</p></dd><dt><a name="RESLIB_NSASLOOK"></a><span class="term">RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
+A debug message, the RunningQuery object is querying the NSAS for the
+nameservers for the specified zone.
+</p></dd><dt><a name="RESLIB_NXDOMRR"></a><span class="term">RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
+A debug message recording that either a NXDOMAIN or an NXRRSET response has
+been received to an upstream query for the specified question. Previous debug
+messages will have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_PROTOCOL"></a><span class="term">RESLIB_PROTOCOL protocol error in answer for %1: %3</span></dt><dd><p>
+A debug message indicating that a protocol error was received. As there
+are no retries left, an error will be reported.
+</p></dd><dt><a name="RESLIB_PROTOCOLRTRY"></a><span class="term">RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
+A debug message indicating that a protocol error was received and that
+the resolver is repeating the query to the same nameserver. After this
+repeated query, there will be the indicated number of retries left.
+</p></dd><dt><a name="RESLIB_RCODERR"></a><span class="term">RESLIB_RCODERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
+A debug message, the response to the specified query indicated an error
+that is not covered by a specific code path. A SERVFAIL will be returned.
+</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_REFERZONE"></a><span class="term">RESLIB_REFERZONE referred to zone %1</span></dt><dd><p>
+A debug message indicating that the last referral message was to the specified
+zone.
+</p></dd><dt><a name="RESLIB_RESCAFND"></a><span class="term">RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
+This is a debug message and indicates that a RecursiveQuery object found the
+the specified <name, class, type> tuple in the cache. The instance number
+at the end of the message indicates which of the two resolve() methods has
+been called.
+</p></dd><dt><a name="RESLIB_RESCANOTFND"></a><span class="term">RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
+This is a debug message and indicates that the look in the cache made by the
+RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
+object has been created to resolve the question. The instance number at
+the end of the message indicates which of the two resolve() methods has
+been called.
+</p></dd><dt><a name="RESLIB_RESOLVE"></a><span class="term">RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</span></dt><dd><p>
+A debug message, the RecursiveQuery::resolve method has been called to resolve
+the specified <name, class, type> tuple. The first action will be to lookup
+the specified tuple in the cache. The instance number at the end of the
+message indicates which of the two resolve() methods has been called.
+</p></dd><dt><a name="RESLIB_RRSETFND"></a><span class="term">RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
+A debug message, indicating that when RecursiveQuery::resolve queried the
+cache, a single RRset was found which was put in the answer. The instance
+number at the end of the message indicates which of the two resolve()
+methods has been called.
+</p></dd><dt><a name="RESLIB_RTT"></a><span class="term">RESLIB_RTT round-trip time of last query calculated as %1 ms</span></dt><dd><p>
+A debug message giving the round-trip time of the last query and response.
+</p></dd><dt><a name="RESLIB_RUNCAFND"></a><span class="term">RESLIB_RUNCAFND found <%1> in the cache</span></dt><dd><p>
+This is a debug message and indicates that a RunningQuery object found
+the specified <name, class, type> tuple in the cache.
+</p></dd><dt><a name="RESLIB_RUNCALOOK"></a><span class="term">RESLIB_RUNCALOOK looking up up <%1> in the cache</span></dt><dd><p>
+This is a debug message and indicates that a RunningQuery object has made
+a call to its doLookup() method to look up the specified <name, class, type>
+tuple, the first action of which will be to examine the cache.
+</p></dd><dt><a name="RESLIB_RUNQUFAIL"></a><span class="term">RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</span></dt><dd><p>
+A debug message indicating that a RunningQuery's failure callback has been
+called because all nameservers for the zone in question are unreachable.
+</p></dd><dt><a name="RESLIB_RUNQUSUCC"></a><span class="term">RESLIB_RUNQUSUCC success callback - sending query to %1</span></dt><dd><p>
+A debug message indicating that a RunningQuery's success callback has been
+called because a nameserver has been found, and that a query is being sent
+to the specified nameserver.
+</p></dd><dt><a name="RESLIB_TESTSERV"></a><span class="term">RESLIB_TESTSERV setting test server to %1(%2)</span></dt><dd><p>
+This is an internal debugging message and is only generated in unit tests.
+It indicates that all upstream queries from the resolver are being routed to
+the specified server, regardless of the address of the nameserver to which
+the query would normally be routed. As it should never be seen in normal
+operation, it is a warning message instead of a debug message.
+</p></dd><dt><a name="RESLIB_TESTUPSTR"></a><span class="term">RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</span></dt><dd><p>
+This is a debug message and should only be seen in unit tests. A query for
+the specified <name, class, type> tuple is being sent to a test nameserver
+whose address is given in the message.
+</p></dd><dt><a name="RESLIB_TIMEOUT"></a><span class="term">RESLIB_TIMEOUT query <%1> to %2 timed out</span></dt><dd><p>
+A debug message indicating that the specified query has timed out and as
+there are no retries left, an error will be reported.
+</p></dd><dt><a name="RESLIB_TIMEOUTRTRY"></a><span class="term">RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
+A debug message indicating that the specified query has timed out and that
+the resolver is repeating the query to the same nameserver. After this
+repeated query, there will be the indicated number of retries left.
+</p></dd><dt><a name="RESLIB_TRUNCATED"></a><span class="term">RESLIB_TRUNCATED response to query for <%1> was truncated, re-querying over TCP</span></dt><dd><p>
+A debug message, this indicates that the response to the specified query was
+truncated and that the resolver will be re-querying over TCP. There are
+various reasons why responses may be truncated, so this message is normal and
+gives no cause for concern.
+</p></dd><dt><a name="RESLIB_UPSTREAM"></a><span class="term">RESLIB_UPSTREAM sending upstream query for <%1> to %2</span></dt><dd><p>
+A debug message indicating that a query for the specified <name, class, type>
+tuple is being sent to a nameserver whose address is given in the message.
+</p></dd><dt><a name="RESOLVER_AXFRTCP"></a><span class="term">RESOLVER_AXFRTCP AXFR request received over TCP</span></dt><dd><p>
+A debug message, the resolver received a NOTIFY message over TCP. The server
+cannot process it and will return an error message to the sender with the
+RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_AXFRUDP"></a><span class="term">RESOLVER_AXFRUDP AXFR request received over UDP</span></dt><dd><p>
+A debug message, the resolver received a NOTIFY message over UDP. The server
+cannot process it (and in any case, an AXFR request should be sent over TCP)
+and will return an error message to the sender with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_CLTMOSMALL"></a><span class="term">RESOLVER_CLTMOSMALL client timeout of %1 is too small</span></dt><dd><p>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</p></dd><dt><a name="RESOLVER_CONFIGCHAN"></a><span class="term">RESOLVER_CONFIGCHAN configuration channel created</span></dt><dd><p>
+A debug message, output when the resolver has successfully established a
+connection to the configuration channel.
+</p></dd><dt><a name="RESOLVER_CONFIGERR"></a><span class="term">RESOLVER_CONFIGERR error in configuration: %1</span></dt><dd><p>
+An error was detected in a configuration update received by the resolver. This
+may be in the format of the configuration message (in which case this is a
+programming error) or it may be in the data supplied (in which case it is
+a user error). The reason for the error, given as a parameter in the message,
+will give more details.
+</p></dd><dt><a name="RESOLVER_CONFIGLOAD"></a><span class="term">RESOLVER_CONFIGLOAD configuration loaded</span></dt><dd><p>
+A debug message, output when the resolver configuration has been successfully
+loaded.
+</p></dd><dt><a name="RESOLVER_CONFIGUPD"></a><span class="term">RESOLVER_CONFIGUPD configuration updated: %1</span></dt><dd><p>
+A debug message, the configuration has been updated with the specified
+information.
+</p></dd><dt><a name="RESOLVER_CREATED"></a><span class="term">RESOLVER_CREATED main resolver object created</span></dt><dd><p>
+A debug message, output when the Resolver() object has been created.
+</p></dd><dt><a name="RESOLVER_DNSMSGRCVD"></a><span class="term">RESOLVER_DNSMSGRCVD DNS message received: %1</span></dt><dd><p>
+A debug message, this always precedes some other logging message and is the
+formatted contents of the DNS packet that the other message refers to.
+</p></dd><dt><a name="RESOLVER_DNSMSGSENT"></a><span class="term">RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
+A debug message, this contains details of the response sent back to the querying
+system.
+</p></dd><dt><a name="RESOLVER_FAILED"></a><span class="term">RESOLVER_FAILED resolver failed, reason: %1</span></dt><dd><p>
+This is an error message output when an unhandled exception is caught by the
+resolver. All it can do is to shut down.
+</p></dd><dt><a name="RESOLVER_FWDADDR"></a><span class="term">RESOLVER_FWDADDR setting forward address %1(%2)</span></dt><dd><p>
+This message may appear multiple times during startup, and it lists the
+forward addresses used by the resolver when running in forwarding mode.
+</p></dd><dt><a name="RESOLVER_FWDQUERY"></a><span class="term">RESOLVER_FWDQUERY processing forward query</span></dt><dd><p>
+The received query has passed all checks and is being forwarded to upstream
+servers.
+</p></dd><dt><a name="RESOLVER_HDRERR"></a><span class="term">RESOLVER_HDRERR message received, exception when processing header: %1</span></dt><dd><p>
+A debug message noting that an exception occurred during the processing of
+a received packet. The packet has been dropped.
+</p></dd><dt><a name="RESOLVER_IXFR"></a><span class="term">RESOLVER_IXFR IXFR request received</span></dt><dd><p>
+The resolver received a NOTIFY message over TCP. The server cannot process it
+and will return an error message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_LKTMOSMALL"></a><span class="term">RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</span></dt><dd><p>
+An error indicating that the configuration value specified for the lookup
+timeout is too small.
+</p></dd><dt><a name="RESOLVER_NFYNOTAUTH"></a><span class="term">RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</span></dt><dd><p>
+The resolver received a NOTIFY message. As the server is not authoritative it
+cannot process it, so it returns an error message to the sender with the RCODE
+set to NOTAUTH.
+</p></dd><dt><a name="RESOLVER_NORMQUERY"></a><span class="term">RESOLVER_NORMQUERY processing normal query</span></dt><dd><p>
+The received query has passed all checks and is being processed by the resolver.
+</p></dd><dt><a name="RESOLVER_NOROOTADDR"></a><span class="term">RESOLVER_NOROOTADDR no root addresses available</span></dt><dd><p>
+A warning message during startup, indicates that no root addresses have been
+set. This may be because the resolver will get them from a priming query.
+</p></dd><dt><a name="RESOLVER_NOTIN"></a><span class="term">RESOLVER_NOTIN non-IN class request received, returning REFUSED message</span></dt><dd><p>
+A debug message, the resolver has received a DNS packet that was not IN class.
+The resolver cannot handle such packets, so is returning a REFUSED response to
+the sender.
+</p></dd><dt><a name="RESOLVER_NOTONEQUES"></a><span class="term">RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</span></dt><dd><p>
+A debug message, the resolver received a query that contained the number of
+entires in the question section detailed in the message. This is a malformed
+message, as a DNS query must contain only one question. The resolver will
+return a message to the sender with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_OPCODEUNS"></a><span class="term">RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</span></dt><dd><p>
+A debug message, the resolver received a message with an unsupported opcode
+(it can only process QUERY opcodes). It will return a message to the sender
+with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_PARSEERR"></a><span class="term">RESOLVER_PARSEERR error parsing received message: %1 - returning %2</span></dt><dd><p>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some non-protocol related reason
+(although the parsing of the header succeeded). The message parameters give
+a textual description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_PRINTMSG"></a><span class="term">RESOLVER_PRINTMSG print message command, aeguments are: %1</span></dt><dd><p>
+This message is logged when a "print_message" command is received over the
+command channel.
+</p></dd><dt><a name="RESOLVER_PROTERR"></a><span class="term">RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some protocol error (although the
+parsing of the header succeeded). The message parameters give a textual
+description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_QUSETUP"></a><span class="term">RESOLVER_QUSETUP query setup</span></dt><dd><p>
+A debug message noting that the resolver is creating a RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUSHUT"></a><span class="term">RESOLVER_QUSHUT query shutdown</span></dt><dd><p>
+A debug message noting that the resolver is destroying a RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUTMOSMALL"></a><span class="term">RESOLVER_QUTMOSMALL query timeout of %1 is too small</span></dt><dd><p>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</p></dd><dt><a name="RESOLVER_RECURSIVE"></a><span class="term">RESOLVER_RECURSIVE running in recursive mode</span></dt><dd><p>
+This is an informational message that appears at startup noting that the
+resolver is running in recursive mode.
+</p></dd><dt><a name="RESOLVER_RECVMSG"></a><span class="term">RESOLVER_RECVMSG resolver has received a DNS message</span></dt><dd><p>
+A debug message indicating that the resolver has received a message. Depending
+on the debug settings, subsequent log output will indicate the nature of the
+message.
+</p></dd><dt><a name="RESOLVER_RETRYNEG"></a><span class="term">RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</span></dt><dd><p>
+An error message indicating that the resolver configuration has specified a
+negative retry count. Only zero or positive values are valid.
+</p></dd><dt><a name="RESOLVER_ROOTADDR"></a><span class="term">RESOLVER_ROOTADDR setting root address %1(%2)</span></dt><dd><p>
+This message may appear multiple times during startup; it lists the root
+addresses used by the resolver.
+</p></dd><dt><a name="RESOLVER_SERVICE"></a><span class="term">RESOLVER_SERVICE service object created</span></dt><dd><p>
+A debug message, output when the main service object (which handles the
+received queries) is created.
+</p></dd><dt><a name="RESOLVER_SETPARAM"></a><span class="term">RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
+A debug message, lists the parameters associated with the message. These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers. Client timeout: the interval to resolver a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolver the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query. Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+</p><p>
+The client and lookup timeouts require a bit more explanation. The
+resolution of the clent query might require a large number of queries to
+upstream nameservers. Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout. When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process. Data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolve gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+</p></dd><dt><a name="RESOLVER_SHUTDOWN"></a><span class="term">RESOLVER_SHUTDOWN resolver shutdown complete</span></dt><dd><p>
+This information message is output when the resolver has shut down.
+</p></dd><dt><a name="RESOLVER_STARTED"></a><span class="term">RESOLVER_STARTED resolver started</span></dt><dd><p>
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+</p></dd><dt><a name="RESOLVER_STARTING"></a><span class="term">RESOLVER_STARTING starting resolver with command line '%1'</span></dt><dd><p>
+An informational message, this is output when the resolver starts up.
+</p></dd><dt><a name="RESOLVER_UNEXRESP"></a><span class="term">RESOLVER_UNEXRESP received unexpected response, ignoring</span></dt><dd><p>
+A debug message noting that the server has received a response instead of a
+query and is ignoring it.
+</p></dd></dl></div><p>
+ </p></div></div></body></html>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
new file mode 100644
index 0000000..eaa8bb9
--- /dev/null
+++ b/doc/guide/bind10-messages.xml
@@ -0,0 +1,2018 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
+<!ENTITY mdash "—" >
+<!ENTITY % version SYSTEM "version.ent">
+%version;
+]>
+<book>
+ <?xml-stylesheet href="bind10-guide.css" type="text/css"?>
+
+ <bookinfo>
+ <title>BIND 10 Messages Manual</title>
+
+ <copyright>
+ <year>2011</year><holder>Internet Systems Consortium, Inc.</holder>
+ </copyright>
+
+ <abstract>
+ <para>BIND 10 is a Domain Name System (DNS) suite managed by
+ Internet Systems Consortium (ISC). It includes DNS libraries
+ and modular components for controlling authoritative and
+ recursive DNS servers.
+ </para>
+ <para>
+ This is the messages manual for BIND 10 version &__VERSION__;.
+ The most up-to-date version of this document, along with
+ other documents for BIND 10, can be found at
+ <ulink url="http://bind10.isc.org/docs"/>.
+ </para>
+ </abstract>
+
+ <releaseinfo>This is the messages manual for BIND 10 version
+ &__VERSION__;.</releaseinfo>
+ </bookinfo>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This document lists each message that can be logged by the
+ programs in the BIND 10 package. Each entry in this manual
+ is of the form:
+ <screen>IDENTIFICATION message-text</screen>
+ ... where "IDENTIFICATION" is the message identification included
+ in each message logged and "message-text" is the accompanying
+ message text. The "message-text" may include placeholders of the
+ form "%1", "%2" etc.; these parameters are replaced by relevant
+ values when the message is logged.
+ </para>
+ <para>
+ Each entry is also accompanied by a description giving more
+ information about the circumstances that result in the message
+ being logged.
+ </para>
+ <para>
+ For information on configuring and using BIND 10 logging,
+ refer to the <ulink url="bind10-guide.html">BIND 10 Guide</ulink>.
+ </para>
+ </chapter>
+
+ <chapter id="messages">
+ <title>BIND 10 Messages</title>
+ <para>
+ <variablelist>
+
+<varlistentry id="ASIODNS_FETCHCOMP">
+<term>ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</term>
+<listitem><para>
+A debug message, this records the the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_FETCHSTOP">
+<term>ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</term>
+<listitem><para>
+An external component has requested the halting of an upstream fetch. This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_OPENSOCK">
+<term>ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The the number of the system error that cause the problem is given in the
+message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_RECVSOCK">
+<term>ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying read data from
+the specified address on the given protocol. The the number of the system
+error that cause the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_RECVTMO">
+<term>ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</term>
+<listitem><para>
+An upstream fetch from the specified address timed out. This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network. The message will only appear if debug is
+enabled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_SENDSOCK">
+<term>ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying send data to
+the specified address on the given protocol. The the number of the system
+error that cause the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKORIGIN">
+<term>ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<listitem><para>
+This message should not appear and indicates an internal error if it does.
+Please enter a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKRESULT">
+<term>ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<listitem><para>
+The termination method of the resolver's upstream fetch class was called with
+an unknown result code (which is given in the message). This message should
+not appear and may indicate an internal error. Please enter a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_CCSESSION_MSG">
+<term>CONFIG_CCSESSION_MSG error in CC session message: %1</term>
+<listitem><para>
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_CCSESSION_MSG_INTERNAL">
+<term>CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</term>
+<listitem><para>
+There was an internal problem handling an incoming message on the
+command and control channel. An unexpected exception was thrown. This
+most likely points to an internal inconsistency in the module code. The
+exception message is appended to the log error, and the module will
+continue to run, but will not send back an answer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_FOPEN_ERR">
+<term>CONFIG_FOPEN_ERR error opening %1: %2</term>
+<listitem><para>
+There was an error opening the given file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_JSON_PARSE">
+<term>CONFIG_JSON_PARSE JSON parse error in %1: %2</term>
+<listitem><para>
+There was a parse error in the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MANAGER_CONFIG">
+<term>CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</term>
+<listitem><para>
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MANAGER_MOD_SPEC">
+<term>CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</term>
+<listitem><para>
+The module specification file for this module was rejected by the
+configuration manager. The full error message answer from the
+configuration manager is appended to the log error. The most likely
+cause is that the module is of a different (specification file) version
+than the running configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MODULE_SPEC">
+<term>CONFIG_MODULE_SPEC module specification error in %1: %2</term>
+<listitem><para>
+The given file does not appear to be a valid specification file. Please
+verify that the filename is correct and that its contents are a valid
+BIND10 module specification.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_CREATE">
+<term>DATASRC_CACHE_CREATE creating the hotspot cache</term>
+<listitem><para>
+Debug information that the hotspot cache was created at startup.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_DESTROY">
+<term>DATASRC_CACHE_DESTROY destroying the hotspot cache</term>
+<listitem><para>
+Debug information. The hotspot cache is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_DISABLE">
+<term>DATASRC_CACHE_DISABLE disabling the cache</term>
+<listitem><para>
+The hotspot cache is disabled from now on. It is not going to store
+information or return anything.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_ENABLE">
+<term>DATASRC_CACHE_ENABLE enabling the cache</term>
+<listitem><para>
+The hotspot cache is enabled from now on.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_EXPIRED">
+<term>DATASRC_CACHE_EXPIRED the item '%1' is expired</term>
+<listitem><para>
+Debug information. There was an attempt to look up an item in the hotspot
+cache. And the item was actually there, but it was too old, so it was removed
+instead and nothing is reported (the external behaviour is the same as with
+CACHE_NOT_FOUND).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_FOUND">
+<term>DATASRC_CACHE_FOUND the item '%1' was found</term>
+<listitem><para>
+Debug information. An item was successfully looked up in the hotspot cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_FULL">
+<term>DATASRC_CACHE_FULL cache is full, dropping oldest</term>
+<listitem><para>
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_INSERT">
+<term>DATASRC_CACHE_INSERT inserting item '%1' into the cache</term>
+<listitem><para>
+Debug information. It means a new item is being inserted into the hotspot
+cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_NOT_FOUND">
+<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found</term>
+<listitem><para>
+Debug information. It was attempted to look up an item in the hotspot cache,
+but it is not there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_OLD_FOUND">
+<term>DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</term>
+<listitem><para>
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found. The old instance will be
+removed. This should be directly followed by CACHE_REMOVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_REMOVE">
+<term>DATASRC_CACHE_REMOVE removing '%1' from the cache</term>
+<listitem><para>
+Debug information. An item is being removed from the hotspot cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_SLOTS">
+<term>DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</term>
+<listitem><para>
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DO_QUERY">
+<term>DATASRC_DO_QUERY handling query for '%1/%2'</term>
+<listitem><para>
+Debug information. We're processing some internal query for given name and
+type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_RRSET">
+<term>DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</term>
+<listitem><para>
+Debug information. An RRset is being added to the in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_WILDCARD">
+<term>DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</term>
+<listitem><para>
+Debug information. Some special marks above each * in wildcard name are needed.
+They are being added now for this name.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_ZONE">
+<term>DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</term>
+<listitem><para>
+Debug information. A zone is being added into the in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ANY_SUCCESS">
+<term>DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</term>
+<listitem><para>
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME">
+<term>DATASRC_MEM_CNAME CNAME at the domain '%1'</term>
+<listitem><para>
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME_COEXIST">
+<term>DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</term>
+<listitem><para>
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some outher data to CNAME.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME_TO_NONEMPTY">
+<term>DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</term>
+<listitem><para>
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CREATE">
+<term>DATASRC_MEM_CREATE creating zone '%1' in '%2' class</term>
+<listitem><para>
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DELEG_FOUND">
+<term>DATASRC_MEM_DELEG_FOUND delegation found at '%1'</term>
+<listitem><para>
+Debug information. A delegation point was found above the requested record.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DESTROY">
+<term>DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class</term>
+<listitem><para>
+Debug information. A zone from in-memory data source is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_ENCOUNTERED">
+<term>DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME</term>
+<listitem><para>
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way. This may lead to redirection to a different domain and
+stop the search.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_FOUND">
+<term>DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</term>
+<listitem><para>
+Debug information. A DNAME was found instead of the requested information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_NS">
+<term>DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</term>
+<listitem><para>
+It was requested for DNAME and NS records to be put into the same domain
+which is not the apex (the top of the zone). This is forbidden by RFC
+2672, section 3. This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DOMAIN_EMPTY">
+<term>DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</term>
+<listitem><para>
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DUP_RRSET">
+<term>DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'</term>
+<listitem><para>
+An RRset is being inserted into in-memory data source for a second time. The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_EXACT_DELEGATION">
+<term>DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'</term>
+<listitem><para>
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_FIND">
+<term>DATASRC_MEM_FIND find '%1/%2'</term>
+<listitem><para>
+Debug information. A search for the requested RRset is being started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_FIND_ZONE">
+<term>DATASRC_MEM_FIND_ZONE looking for zone '%1'</term>
+<listitem><para>
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_LOAD">
+<term>DATASRC_MEM_LOAD loading zone '%1' from file '%2'</term>
+<listitem><para>
+Debug information. The content of master file is being loaded into the memory.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NOTFOUND">
+<term>DATASRC_MEM_NOTFOUND requested domain '%1' not found</term>
+<listitem><para>
+Debug information. The requested domain does not exist.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NS_ENCOUNTERED">
+<term>DATASRC_MEM_NS_ENCOUNTERED encountered a NS</term>
+<listitem><para>
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NXRRSET">
+<term>DATASRC_MEM_NXRRSET no such type '%1' at '%2'</term>
+<listitem><para>
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_OUT_OF_ZONE">
+<term>DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'</term>
+<listitem><para>
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_RENAME">
+<term>DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'</term>
+<listitem><para>
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SINGLETON">
+<term>DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'</term>
+<listitem><para>
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SUCCESS">
+<term>DATASRC_MEM_SUCCESS query for '%1/%2' successful</term>
+<listitem><para>
+Debug information. The requested record was found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SUPER_STOP">
+<term>DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty</term>
+<listitem><para>
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SWAP">
+<term>DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')</term>
+<listitem><para>
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_CANCEL">
+<term>DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'</term>
+<listitem><para>
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here. This
+behaviour is specified by RFC 1034, section 4.3.3
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_DNAME">
+<term>DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</term>
+<listitem><para>
+The software refuses to load DNAME records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_NS">
+<term>DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</term>
+<listitem><para>
+The software refuses to load NS records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_ADD">
+<term>DATASRC_META_ADD adding a data source into meta data source</term>
+<listitem><para>
+Debug information. Yet another data source is being added into the meta data
+source. (probably at startup or reconfiguration)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_ADD_CLASS_MISMATCH">
+<term>DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</term>
+<listitem><para>
+It was attempted to add a data source into a meta data source. But their
+classes do not match.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_REMOVE">
+<term>DATASRC_META_REMOVE removing data source from meta data source</term>
+<listitem><para>
+Debug information. A data source is being removed from meta data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_NSEC">
+<term>DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'</term>
+<listitem><para>
+Debug information. A NSEC record covering this zone is being added.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_NSEC3">
+<term>DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'</term>
+<listitem><para>
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_RRSET">
+<term>DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message</term>
+<listitem><para>
+Debug information. An RRset is being added to the response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_SOA">
+<term>DATASRC_QUERY_ADD_SOA adding SOA of '%1'</term>
+<listitem><para>
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_AUTH_FAIL">
+<term>DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_BAD_REFERRAL">
+<term>DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</term>
+<listitem><para>
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_CACHED">
+<term>DATASRC_QUERY_CACHED data for %1/%2 found in cache</term>
+<listitem><para>
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_CHECK_CACHE">
+<term>DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</term>
+<listitem><para>
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_COPY_AUTH">
+<term>DATASRC_QUERY_COPY_AUTH copying authoritative section into message</term>
+<listitem><para>
+Debug information. The whole referral information is being copied into the
+response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_DELEGATION">
+<term>DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</term>
+<listitem><para>
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_EMPTY_CNAME">
+<term>DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</term>
+<listitem><para>
+There was an CNAME and it was being followed. But it contains no records,
+so there's nowhere to go. There will be no answer. This indicates a problem
+with supplied data.
+We tried to follow
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_EMPTY_DNAME">
+<term>DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</term>
+<listitem><para>
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_FAIL">
+<term>DATASRC_QUERY_FAIL query failed</term>
+<listitem><para>
+Some subtask of query processing failed. The reason should have been reported
+already. We are returning SERVFAIL.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_FOLLOW_CNAME">
+<term>DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</term>
+<listitem><para>
+Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
+for it already), so it's being followed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GET_MX_ADDITIONAL">
+<term>DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</term>
+<listitem><para>
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GET_NS_ADDITIONAL">
+<term>DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'</term>
+<listitem><para>
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GLUE_FAIL">
+<term>DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_INVALID_OP">
+<term>DATASRC_QUERY_INVALID_OP invalid query operation requested</term>
+<listitem><para>
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_AUTH">
+<term>DATASRC_QUERY_IS_AUTH auth query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is an auth query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_GLUE">
+<term>DATASRC_QUERY_IS_GLUE glue query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is query for glue addresses.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_NOGLUE">
+<term>DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is query for addresses that are not
+glue.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_REF">
+<term>DATASRC_QUERY_IS_REF query for referral (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is query for referral information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_SIMPLE">
+<term>DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is a simple query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISPLACED_TASK">
+<term>DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</term>
+<listitem><para>
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISSING_NS">
+<term>DATASRC_QUERY_MISSING_NS missing NS records for '%1'</term>
+<listitem><para>
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISSING_SOA">
+<term>DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA</term>
+<listitem><para>
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NOGLUE_FAIL">
+<term>DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_AUTH">
+<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<listitem><para>
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE">
+<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<listitem><para>
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_DS_NSEC">
+<term>DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</term>
+<listitem><para>
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_DS_NSEC3">
+<term>DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone</term>
+<listitem><para>
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_ZONE">
+<term>DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'</term>
+<listitem><para>
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_PROCESS">
+<term>DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</term>
+<listitem><para>
+Debug information. A sure query is being processed now.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_PROVENX_FAIL">
+<term>DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</term>
+<listitem><para>
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_REF_FAIL">
+<term>DATASRC_QUERY_REF_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_RRSIG">
+<term>DATASRC_QUERY_RRSIG unable to answer RRSIG query</term>
+<listitem><para>
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_SIMPLE_FAIL">
+<term>DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_SYNTH_CNAME">
+<term>DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</term>
+<listitem><para>
+Debug information. While answering a query, a DNAME was met. The DNAME itself
+will be returned, but along with it a CNAME for clients which don't understand
+DNAMEs will be synthesized.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_TASK_FAIL">
+<term>DATASRC_QUERY_TASK_FAIL task failed with %1</term>
+<listitem><para>
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_TOO_MANY_CNAMES">
+<term>DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</term>
+<listitem><para>
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_UNKNOWN_RESULT">
+<term>DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask</term>
+<listitem><para>
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD">
+<term>DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'</term>
+<listitem><para>
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_FAIL">
+<term>DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</term>
+<listitem><para>
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_PROVENX_FAIL">
+<term>DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</term>
+<listitem><para>
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record. The code is 1 for error and 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_REFERRAL">
+<term>DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</term>
+<listitem><para>
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it. The code is 1 for error, 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CLOSE">
+<term>DATASRC_SQLITE_CLOSE closing SQLite database</term>
+<listitem><para>
+Debug information. The SQLite data source is closing the database file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CREATE">
+<term>DATASRC_SQLITE_CREATE sQLite data source created</term>
+<listitem><para>
+Debug information. An instance of SQLite data source is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_DESTROY">
+<term>DATASRC_SQLITE_DESTROY sQLite data source destroyed</term>
+<listitem><para>
+Debug information. An instance of SQLite data source is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE">
+<term>DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is trying to identify, which zone
+should hold this domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOTFOUND">
+<term>DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</term>
+<listitem><para>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+no such zone in our data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND">
+<term>DATASRC_SQLITE_FIND looking for RRset '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up a resource record
+set.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDADDRS">
+<term>DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'</term>
+<listitem><para>
+Debug information. The data source is looking up the addresses for given
+domain name.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDADDRS_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDEXACT">
+<term>DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up an exact resource
+record.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDEXACT_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREC">
+<term>DATASRC_SQLITE_FINDREC looking for record '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREF">
+<term>DATASRC_SQLITE_FINDREF looking for referral at '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREF_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was trying to identify, if there's a referral. But
+it contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_BAD_CLASS">
+<term>DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_NSEC3">
+<term>DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'</term>
+<listitem><para>
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE">
+<term>DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</term>
+<listitem><para>
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_OPEN">
+<term>DATASRC_SQLITE_OPEN opening SQLite database '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_PREVIOUS">
+<term>DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</term>
+<listitem><para>
+Debug information. We're trying to look up name preceding the supplied one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_PREVIOUS_NO_ZONE">
+<term>DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</term>
+<listitem><para>
+The SQLite data source tried to identify name preceding this one. But this
+one is not contained in any zone in the data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_SETUP">
+<term>DATASRC_SQLITE_SETUP setting up SQLite database</term>
+<listitem><para>
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema. It'll still contain
+no data, but it will be ready for use.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_BAD_CLASS">
+<term>DATASRC_STATIC_BAD_CLASS static data source can handle CH only</term>
+<listitem><para>
+For some reason, someone asked the static data source a query that is not in
+the CH class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_CREATE">
+<term>DATASRC_STATIC_CREATE creating the static datasource</term>
+<listitem><para>
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_FIND">
+<term>DATASRC_STATIC_FIND looking for '%1/%2'</term>
+<listitem><para>
+Debug information. This resource record set is being looked up in the static
+data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_UNEXPECTED_QUERY_STATE">
+<term>DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</term>
+<listitem><para>
+This indicates a programming error. An internal task of unknown type was
+generated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_ABOVEDBGMAX">
+<term>LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</term>
+<listitem><para>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is above the maximum allowed value and has
+been reduced to that value.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_BADDEBUG">
+<term>LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</term>
+<listitem><para>
+The string indicating the extended logging level (used by the underlying
+logger implementation code) is not of the stated form. In particular,
+it starts DEBUG but does not end with an integer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_BELOWDBGMIN">
+<term>LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</term>
+<listitem><para>
+A message from the underlying logger implementation code, the debug level
+(as set by the string DEBGUGn) is below the minimum allowed value and has
+been increased to that value.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_BADDESTINATION">
+<term>MSG_BADDESTINATION unrecognized log destination: %1</term>
+<listitem><para>
+A logger destination value was given that was not recognized. The
+destination should be one of "console", "file", or "syslog".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_BADSEVERITY">
+<term>MSG_BADSEVERITY unrecognized log severity: %1</term>
+<listitem><para>
+A logger severity value was given that was not recognized. The severity
+should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_BADSTREAM">
+<term>MSG_BADSTREAM bad log console output stream: %1</term>
+<listitem><para>
+A log console output stream was given that was not recognized. The
+output stream should be one of "stdout", or "stderr"
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_DUPLNS">
+<term>MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</term>
+<listitem><para>
+When reading a message file, more than one $NAMESPACE directive was found. In
+this version of the code, such a condition is regarded as an error and the
+read will be abandoned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_DUPMSGID">
+<term>MSG_DUPMSGID duplicate message ID (%1) in compiled code</term>
+<listitem><para>
+Indicative of a programming error, when it started up, BIND10 detected that
+the given message ID had been registered by one or more modules. (All message
+IDs should be unique throughout BIND10.) This has no impact on the operation
+of the server other that erroneous messages may be logged. (When BIND10 loads
+the message IDs (and their associated text), if a duplicate ID is found it is
+discarded. However, when the module that supplied the duplicate ID logs that
+particular message, the text supplied by the module that added the original
+ID will be output - something that may bear no relation to the condition being
+logged.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_IDNOTFND">
+<term>MSG_IDNOTFND could not replace message text for '%1': no such message</term>
+<listitem><para>
+During start-up a local message file was read. A line with the listed
+message identification was found in the file, but the identification is not
+one contained in the compiled-in message dictionary. Either the message
+identification has been mis-spelled in the file, or the local file was used
+for an earlier version of the software and the message with that
+identification has been removed.
+</para><para>
+This message may appear a number of times in the file, once for every such
+unknown message identification.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_INVMSGID">
+<term>MSG_INVMSGID line %1: invalid message identification '%2'</term>
+<listitem><para>
+The concatenation of the prefix and the message identification is used as
+a symbol in the C++ module; as such it may only contain
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NOMSGID">
+<term>MSG_NOMSGID line %1: message definition line found without a message ID</term>
+<listitem><para>
+Message definition lines are lines starting with a "%". The rest of the line
+should comprise the message ID and text describing the message. This error
+indicates the message compiler found a line in the message file comprising
+just the "%" and nothing else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NOMSGTXT">
+<term>MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</term>
+<listitem><para>
+Message definition lines are lines starting with a "%". The rest of the line
+should comprise the message ID and text describing the message. This error
+is generated when a line is found in the message file that contains the
+leading "%" and the message identification but no text.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NSEXTRARG">
+<term>MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</term>
+<listitem><para>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed. This error is generated when the
+compiler finds a $NAMESPACE directive with more than one argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NSINVARG">
+<term>MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<listitem><para>
+The $NAMESPACE argument should be a valid C++ namespace. The reader does a
+cursory check on its validity, checking that the characters in the namespace
+are correct. The error is generated when the reader finds an invalid
+character. (Valid are alphanumeric characters, underscores and colons.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_NSNOARG">
+<term>MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</term>
+<listitem><para>
+The $NAMESPACE directive takes a single argument, a namespace in which all the
+generated symbol names are placed. This error is generated when the
+compiler finds a $NAMESPACE directive with no arguments.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_OPENIN">
+<term>MSG_OPENIN unable to open message file %1 for input: %2</term>
+<listitem><para>
+The program was not able to open the specified input message file for the
+reason given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_OPENOUT">
+<term>MSG_OPENOUT unable to open %1 for output: %2</term>
+<listitem><para>
+The program was not able to open the specified output file for the reason
+given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_PRFEXTRARG">
+<term>MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</term>
+<listitem><para>
+The $PREFIX directive takes a single argument, a prefix to be added to the
+symbol names when a C++ .h file is created. This error is generated when the
+compiler finds a $PREFIX directive with more than one argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_PRFINVARG">
+<term>MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
+<listitem><para>
+The $PREFIX argument is used in a symbol name in a C++ header file. As such,
+it must adhere to restrictions on C++ symbol names (e.g. may only contain
+alphanumeric characters or underscores, and may nor start with a digit).
+A $PREFIX directive was found with an argument (given in the message) that
+violates those restictions.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_RDLOCMES">
+<term>MSG_RDLOCMES reading local message file %1</term>
+<listitem><para>
+This is an informational message output by BIND10 when it starts to read a
+local message file. (A local message file may replace the text of one of more
+messages; the ID of the message will not be changed though.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_READERR">
+<term>MSG_READERR error reading from message file %1: %2</term>
+<listitem><para>
+The specified error was encountered reading from the named message file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_UNRECDIR">
+<term>MSG_UNRECDIR line %1: unrecognised directive '%2'</term>
+<listitem><para>
+A line starting with a dollar symbol was found, but the first word on the line
+(shown in the message) was not a recognised message compiler directive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="MSG_WRITERR">
+<term>MSG_WRITERR error writing to %1: %2</term>
+<listitem><para>
+The specified error was encountered by the message compiler when writing to
+the named output file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_INVRESPSTR">
+<term>NSAS_INVRESPSTR queried for %1 but got invalid response</term>
+<listitem><para>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver. The NSAS made a query for a RR for the
+specified nameserver but received an invalid response. Either the success
+function was called without a DNS message or the message was invalid on some
+way. (In the latter case, the error should have been picked up elsewhere in
+the processing logic, hence the raising of the error here.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_INVRESPTC">
+<term>NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<listitem><para>
+This message indicates an internal error in the nameserver address store
+component (NSAS) of the resolver. The NSAS made a query for the given RR
+type and class, but instead received an answer with the given type and class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_LOOKUPCANCEL">
+<term>NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</term>
+<listitem><para>
+A debug message, this is output when a NSAS (nameserver address store -
+part of the resolver) lookup for a zone has been cancelled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_LOOKUPZONE">
+<term>NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</term>
+<listitem><para>
+A debug message, this is output when a call is made to the nameserver address
+store (part of the resolver) to obtain the nameservers for the specified zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_NSADDR">
+<term>NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</term>
+<listitem><para>
+A debug message, the NSAS (nameserver address store - part of the resolver) is
+making a callback into the resolver to retrieve the address records for the
+specified nameserver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_NSLKUPFAIL">
+<term>NSAS_NSLKUPFAIL failed to lookup any %1 for %2</term>
+<listitem><para>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has been unable to retrieve the specified resource record for the specified
+nameserver. This is not necessarily a problem - the nameserver may be
+unreachable, in which case the NSAS will try other nameservers in the zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_NSLKUPSUCC">
+<term>NSAS_NSLKUPSUCC found address %1 for %2</term>
+<listitem><para>
+A debug message, the NSAS (nameserver address store - part of the resolver)
+has retrieved the given address for the specified nameserver through an
+external query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_SETRTT">
+<term>NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</term>
+<listitem><para>
+A NSAS (nameserver address store - part of the resolver) debug message
+reporting the round-trip time (RTT) for a query made to the specified
+nameserver. The RTT has been updated using the value given and the new RTT is
+displayed. (The RTT is subject to a calculation that damps out sudden
+changes. As a result, the new RTT is not necessarily equal to the RTT
+reported.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_ANSWER">
+<term>RESLIB_ANSWER answer received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that an answer has been received to an upstream
+query for the specified question. Previous debug messages will have indicated
+the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_CNAME">
+<term>RESLIB_CNAME CNAME received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that CNAME response has been received to an upstream
+query for the specified question. Previous debug messages will have indicated
+the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_DEEPEST">
+<term>RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</term>
+<listitem><para>
+A debug message, a cache lookup did not find the specified <name, class,
+type> tuple in the cache; instead, the deepest delegation found is indicated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_FOLLOWCNAME">
+<term>RESLIB_FOLLOWCNAME following CNAME chain to <%1></term>
+<listitem><para>
+A debug message, a CNAME response was received and another query is being issued
+for the <name, class, type> tuple.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_LONGCHAIN">
+<term>RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
+<listitem><para>
+A debug message recording that a CNAME response has been received to an upstream
+query for the specified question (Previous debug messages will have indicated
+the server to which the question was sent). However, receipt of this CNAME
+has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
+is where on CNAME points to another) and so an error is being returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NONSRRSET">
+<term>RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></term>
+<listitem><para>
+A debug message, this indicates that a response was received for the specified
+query and was categorised as a referral. However, the received message did
+not contain any NS RRsets. This may indicate a programming error in the
+response classification code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NSASLOOK">
+<term>RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</term>
+<listitem><para>
+A debug message, the RunningQuery object is querying the NSAS for the
+nameservers for the specified zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NXDOMRR">
+<term>RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that either a NXDOMAIN or an NXRRSET response has
+been received to an upstream query for the specified question. Previous debug
+messages will have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_PROTOCOL">
+<term>RESLIB_PROTOCOL protocol error in answer for %1: %3</term>
+<listitem><para>
+A debug message indicating that a protocol error was received. As there
+are no retries left, an error will be reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_PROTOCOLRTRY">
+<term>RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</term>
+<listitem><para>
+A debug message indicating that a protocol error was received and that
+the resolver is repeating the query to the same nameserver. After this
+repeated query, there will be the indicated number of retries left.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RCODERR">
+<term>RESLIB_RCODERR RCODE indicates error in response to query for <%1></term>
+<listitem><para>
+A debug message, the response to the specified query indicated an error
+that is not covered by a specific code path. A SERVFAIL will be returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFERRAL">
+<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFERZONE">
+<term>RESLIB_REFERZONE referred to zone %1</term>
+<listitem><para>
+A debug message indicating that the last referral message was to the specified
+zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RESCAFND">
+<term>RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</term>
+<listitem><para>
+This is a debug message and indicates that a RecursiveQuery object found the
+the specified <name, class, type> tuple in the cache. The instance number
+at the end of the message indicates which of the two resolve() methods has
+been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RESCANOTFND">
+<term>RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
+<listitem><para>
+This is a debug message and indicates that the look in the cache made by the
+RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
+object has been created to resolve the question. The instance number at
+the end of the message indicates which of the two resolve() methods has
+been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RESOLVE">
+<term>RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</term>
+<listitem><para>
+A debug message, the RecursiveQuery::resolve method has been called to resolve
+the specified <name, class, type> tuple. The first action will be to lookup
+the specified tuple in the cache. The instance number at the end of the
+message indicates which of the two resolve() methods has been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RRSETFND">
+<term>RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
+<listitem><para>
+A debug message, indicating that when RecursiveQuery::resolve queried the
+cache, a single RRset was found which was put in the answer. The instance
+number at the end of the message indicates which of the two resolve()
+methods has been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RTT">
+<term>RESLIB_RTT round-trip time of last query calculated as %1 ms</term>
+<listitem><para>
+A debug message giving the round-trip time of the last query and response.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNCAFND">
+<term>RESLIB_RUNCAFND found <%1> in the cache</term>
+<listitem><para>
+This is a debug message and indicates that a RunningQuery object found
+the specified <name, class, type> tuple in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNCALOOK">
+<term>RESLIB_RUNCALOOK looking up up <%1> in the cache</term>
+<listitem><para>
+This is a debug message and indicates that a RunningQuery object has made
+a call to its doLookup() method to look up the specified <name, class, type>
+tuple, the first action of which will be to examine the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNQUFAIL">
+<term>RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</term>
+<listitem><para>
+A debug message indicating that a RunningQuery's failure callback has been
+called because all nameservers for the zone in question are unreachable.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNQUSUCC">
+<term>RESLIB_RUNQUSUCC success callback - sending query to %1</term>
+<listitem><para>
+A debug message indicating that a RunningQuery's success callback has been
+called because a nameserver has been found, and that a query is being sent
+to the specified nameserver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TESTSERV">
+<term>RESLIB_TESTSERV setting test server to %1(%2)</term>
+<listitem><para>
+This is an internal debugging message and is only generated in unit tests.
+It indicates that all upstream queries from the resolver are being routed to
+the specified server, regardless of the address of the nameserver to which
+the query would normally be routed. As it should never be seen in normal
+operation, it is a warning message instead of a debug message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TESTUPSTR">
+<term>RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</term>
+<listitem><para>
+This is a debug message and should only be seen in unit tests. A query for
+the specified <name, class, type> tuple is being sent to a test nameserver
+whose address is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TIMEOUT">
+<term>RESLIB_TIMEOUT query <%1> to %2 timed out</term>
+<listitem><para>
+A debug message indicating that the specified query has timed out and as
+there are no retries left, an error will be reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TIMEOUTRTRY">
+<term>RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
+<listitem><para>
+A debug message indicating that the specified query has timed out and that
+the resolver is repeating the query to the same nameserver. After this
+repeated query, there will be the indicated number of retries left.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TRUNCATED">
+<term>RESLIB_TRUNCATED response to query for <%1> was truncated, re-querying over TCP</term>
+<listitem><para>
+A debug message, this indicates that the response to the specified query was
+truncated and that the resolver will be re-querying over TCP. There are
+various reasons why responses may be truncated, so this message is normal and
+gives no cause for concern.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_UPSTREAM">
+<term>RESLIB_UPSTREAM sending upstream query for <%1> to %2</term>
+<listitem><para>
+A debug message indicating that a query for the specified <name, class, type>
+tuple is being sent to a nameserver whose address is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_AXFRTCP">
+<term>RESOLVER_AXFRTCP AXFR request received over TCP</term>
+<listitem><para>
+A debug message, the resolver received a NOTIFY message over TCP. The server
+cannot process it and will return an error message to the sender with the
+RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_AXFRUDP">
+<term>RESOLVER_AXFRUDP AXFR request received over UDP</term>
+<listitem><para>
+A debug message, the resolver received a NOTIFY message over UDP. The server
+cannot process it (and in any case, an AXFR request should be sent over TCP)
+and will return an error message to the sender with the RCODE set to FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CLTMOSMALL">
+<term>RESOLVER_CLTMOSMALL client timeout of %1 is too small</term>
+<listitem><para>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGCHAN">
+<term>RESOLVER_CONFIGCHAN configuration channel created</term>
+<listitem><para>
+A debug message, output when the resolver has successfully established a
+connection to the configuration channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGERR">
+<term>RESOLVER_CONFIGERR error in configuration: %1</term>
+<listitem><para>
+An error was detected in a configuration update received by the resolver. This
+may be in the format of the configuration message (in which case this is a
+programming error) or it may be in the data supplied (in which case it is
+a user error). The reason for the error, given as a parameter in the message,
+will give more details.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGLOAD">
+<term>RESOLVER_CONFIGLOAD configuration loaded</term>
+<listitem><para>
+A debug message, output when the resolver configuration has been successfully
+loaded.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIGUPD">
+<term>RESOLVER_CONFIGUPD configuration updated: %1</term>
+<listitem><para>
+A debug message, the configuration has been updated with the specified
+information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CREATED">
+<term>RESOLVER_CREATED main resolver object created</term>
+<listitem><para>
+A debug message, output when the Resolver() object has been created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_DNSMSGRCVD">
+<term>RESOLVER_DNSMSGRCVD DNS message received: %1</term>
+<listitem><para>
+A debug message, this always precedes some other logging message and is the
+formatted contents of the DNS packet that the other message refers to.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_DNSMSGSENT">
+<term>RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</term>
+<listitem><para>
+A debug message, this contains details of the response sent back to the querying
+system.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FAILED">
+<term>RESOLVER_FAILED resolver failed, reason: %1</term>
+<listitem><para>
+This is an error message output when an unhandled exception is caught by the
+resolver. All it can do is to shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FWDADDR">
+<term>RESOLVER_FWDADDR setting forward address %1(%2)</term>
+<listitem><para>
+This message may appear multiple times during startup, and it lists the
+forward addresses used by the resolver when running in forwarding mode.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FWDQUERY">
+<term>RESOLVER_FWDQUERY processing forward query</term>
+<listitem><para>
+The received query has passed all checks and is being forwarded to upstream
+servers.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_HDRERR">
+<term>RESOLVER_HDRERR message received, exception when processing header: %1</term>
+<listitem><para>
+A debug message noting that an exception occurred during the processing of
+a received packet. The packet has been dropped.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_IXFR">
+<term>RESOLVER_IXFR IXFR request received</term>
+<listitem><para>
+The resolver received a NOTIFY message over TCP. The server cannot process it
+and will return an error message to the sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_LKTMOSMALL">
+<term>RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</term>
+<listitem><para>
+An error indicating that the configuration value specified for the lookup
+timeout is too small.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NFYNOTAUTH">
+<term>RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</term>
+<listitem><para>
+The resolver received a NOTIFY message. As the server is not authoritative it
+cannot process it, so it returns an error message to the sender with the RCODE
+set to NOTAUTH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NORMQUERY">
+<term>RESOLVER_NORMQUERY processing normal query</term>
+<listitem><para>
+The received query has passed all checks and is being processed by the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOROOTADDR">
+<term>RESOLVER_NOROOTADDR no root addresses available</term>
+<listitem><para>
+A warning message during startup, indicates that no root addresses have been
+set. This may be because the resolver will get them from a priming query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOTIN">
+<term>RESOLVER_NOTIN non-IN class request received, returning REFUSED message</term>
+<listitem><para>
+A debug message, the resolver has received a DNS packet that was not IN class.
+The resolver cannot handle such packets, so is returning a REFUSED response to
+the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOTONEQUES">
+<term>RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</term>
+<listitem><para>
+A debug message, the resolver received a query that contained the number of
+entires in the question section detailed in the message. This is a malformed
+message, as a DNS query must contain only one question. The resolver will
+return a message to the sender with the RCODE set to FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_OPCODEUNS">
+<term>RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</term>
+<listitem><para>
+A debug message, the resolver received a message with an unsupported opcode
+(it can only process QUERY opcodes). It will return a message to the sender
+with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PARSEERR">
+<term>RESOLVER_PARSEERR error parsing received message: %1 - returning %2</term>
+<listitem><para>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some non-protocol related reason
+(although the parsing of the header succeeded). The message parameters give
+a textual description of the problem and the RCODE returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PRINTMSG">
+<term>RESOLVER_PRINTMSG print message command, aeguments are: %1</term>
+<listitem><para>
+This message is logged when a "print_message" command is received over the
+command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PROTERR">
+<term>RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</term>
+<listitem><para>
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some protocol error (although the
+parsing of the header succeeded). The message parameters give a textual
+description of the problem and the RCODE returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUSETUP">
+<term>RESOLVER_QUSETUP query setup</term>
+<listitem><para>
+A debug message noting that the resolver is creating a RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUSHUT">
+<term>RESOLVER_QUSHUT query shutdown</term>
+<listitem><para>
+A debug message noting that the resolver is destroying a RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUTMOSMALL">
+<term>RESOLVER_QUTMOSMALL query timeout of %1 is too small</term>
+<listitem><para>
+An error indicating that the configuration value specified for the query
+timeout is too small.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RECURSIVE">
+<term>RESOLVER_RECURSIVE running in recursive mode</term>
+<listitem><para>
+This is an informational message that appears at startup noting that the
+resolver is running in recursive mode.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RECVMSG">
+<term>RESOLVER_RECVMSG resolver has received a DNS message</term>
+<listitem><para>
+A debug message indicating that the resolver has received a message. Depending
+on the debug settings, subsequent log output will indicate the nature of the
+message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RETRYNEG">
+<term>RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</term>
+<listitem><para>
+An error message indicating that the resolver configuration has specified a
+negative retry count. Only zero or positive values are valid.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_ROOTADDR">
+<term>RESOLVER_ROOTADDR setting root address %1(%2)</term>
+<listitem><para>
+This message may appear multiple times during startup; it lists the root
+addresses used by the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SERVICE">
+<term>RESOLVER_SERVICE service object created</term>
+<listitem><para>
+A debug message, output when the main service object (which handles the
+received queries) is created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SETPARAM">
+<term>RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
+<listitem><para>
+A debug message, lists the parameters associated with the message. These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers. Client timeout: the interval to resolver a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolver the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query. Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+</para><para>
+The client and lookup timeouts require a bit more explanation. The
+resolution of the clent query might require a large number of queries to
+upstream nameservers. Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout. When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process. Data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolve gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SHUTDOWN">
+<term>RESOLVER_SHUTDOWN resolver shutdown complete</term>
+<listitem><para>
+This information message is output when the resolver has shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_STARTED">
+<term>RESOLVER_STARTED resolver started</term>
+<listitem><para>
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_STARTING">
+<term>RESOLVER_STARTING starting resolver with command line '%1'</term>
+<listitem><para>
+An informational message, this is output when the resolver starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_UNEXRESP">
+<term>RESOLVER_UNEXRESP received unexpected response, ignoring</term>
+<listitem><para>
+A debug message noting that the server has received a response instead of a
+query and is ignoring it.
+</para></listitem>
+</varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+</book>
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index 9c52504..64136c1 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -16,7 +16,8 @@ endif
pkglibexecdir = $(libexecdir)/@PACKAGE@
-CLEANFILES = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES += auth_messages.h auth_messages.cc
man_MANS = b10-auth.8
EXTRA_DIST = $(man_MANS) b10-auth.xml
@@ -34,16 +35,25 @@ auth.spec: auth.spec.pre
spec_config.h: spec_config.h.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
-BUILT_SOURCES = spec_config.h
+auth_messages.h auth_messages.cc: auth_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/auth/auth_messages.mes
+
+BUILT_SOURCES = spec_config.h auth_messages.h auth_messages.cc
+
pkglibexec_PROGRAMS = b10-auth
b10_auth_SOURCES = query.cc query.h
b10_auth_SOURCES += auth_srv.cc auth_srv.h
+b10_auth_SOURCES += auth_log.cc auth_log.h
b10_auth_SOURCES += change_user.cc change_user.h
b10_auth_SOURCES += auth_config.cc auth_config.h
b10_auth_SOURCES += command.cc command.h
b10_auth_SOURCES += common.h common.cc
b10_auth_SOURCES += statistics.cc statistics.h
b10_auth_SOURCES += main.cc
+
+nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc
+EXTRA_DIST += auth_messages.mes
+
b10_auth_LDADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
b10_auth_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
diff --git a/src/bin/auth/auth_log.cc b/src/bin/auth/auth_log.cc
new file mode 100644
index 0000000..d41eaea
--- /dev/null
+++ b/src/bin/auth/auth_log.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// Defines the logger used by the top-level component of b10-auth.
+
+#include "auth_log.h"
+
+namespace isc {
+namespace auth {
+
+isc::log::Logger auth_logger("auth");
+
+} // namespace auth
+} // namespace isc
+
diff --git a/src/bin/auth/auth_log.h b/src/bin/auth/auth_log.h
new file mode 100644
index 0000000..5205624
--- /dev/null
+++ b/src/bin/auth/auth_log.h
@@ -0,0 +1,54 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __AUTH_LOG__H
+#define __AUTH_LOG__H
+
+#include <log/macros.h>
+#include <auth/auth_messages.h>
+
+namespace isc {
+namespace auth {
+
+/// \brief Auth Logging
+///
+/// Defines the levels used to output debug messages in the "auth" part of
+/// the b10-auth program. Higher numbers equate to more verbose (and detailed)
+/// output.
+
+// Debug messages indicating normal startup are logged at this debug level.
+const int DBG_AUTH_START = 10;
+
+// Debug level used to log setting information (such as configuration changes).
+const int DBG_AUTH_OPS = 30;
+
+// Trace detailed operations, including errors raised when processing invalid
+// packets. (These are not logged at severities of WARN or higher for fear
+// that a set of deliberately invalid packets set to the authoritative server
+// could overwhelm the logging.)
+const int DBG_AUTH_DETAIL = 50;
+
+// This level is used to log the contents of packets received and sent.
+const int DBG_AUTH_MESSAGES = 70;
+
+/// Define the logger for the "auth" module part of b10-auth. We could define
+/// a logger in each file, but we would want to define a common name to avoid
+/// spelling mistakes, so it is just one small step from there to define a
+/// module-common logger.
+extern isc::log::Logger auth_logger;
+
+} // namespace nsas
+} // namespace isc
+
+#endif // __AUTH_LOG__H
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
new file mode 100644
index 0000000..8553d17
--- /dev/null
+++ b/src/bin/auth/auth_messages.mes
@@ -0,0 +1,260 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::auth
+
+% AUTH_AXFR_ERROR error handling AXFR request: %1
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+
+% AUTH_AXFR_UDP AXFR query received over UDP
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+
+% AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+
+% AUTH_CONFIG_CHANNEL_CREATED configuration session channel created
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_STARTED configuration session channel started
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+
+% AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+
+% AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+
+% AUTH_DATA_SOURCE data source database file: %1
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+
+% AUTH_DNS_SERVICES_CREATED DNS services created
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritiative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+
+% AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+
+% AUTH_LOAD_TSIG loading TSIG keys
+This is a debug message indicating that the authoritiative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_LOAD_ZONE loaded zone %1/%2
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+
+% AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+
+% AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+
+% AUTH_NO_STATS_SESSION session interface for statistics is not available
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+
+% AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+
+% AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+
+% AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+
+% AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+
+% AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+
+% AUTH_PACKET_RECEIVED message received:\n%1
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_PROCESS_FAIL message processing failure: %1
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+
+The server will return a SERVFAIL error code to the sender of the packet.
+However, this message indicates a potential error in the server.
+Please open a bug ticket for this issue.
+
+% AUTH_RECEIVED_COMMAND command '%1' received
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+
+% AUTH_RECEIVED_SENDSTATS command 'sendstats' received
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+
+% AUTH_RESPONSE_RECEIVED received response message, ignoring
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+
+% AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SERVER_CREATED server created
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+
+% AUTH_SERVER_FAILED server failed: %1
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+
+% AUTH_SERVER_STARTED server stated
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+
+% AUTH_SQLITE3 nothing to do for loading sqlite3
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+
+% AUTH_STATS_CHANNEL_CREATED STATS session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_STATS_COMMS communication error in sending statistics data: %1
+An error was encountered when the authoritiative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+
+% AUTH_STATS_TIMEOUT timeout while sending statistics data: %1
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+
+% AUTH_STATS_TIMER_DISABLED statistics timer has been disabled
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+
+% AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+
+% AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+
+% AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+
+% AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_ZONEMGR_COMMS error communicating with zone manager: %1
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+
+% AUTH_ZONEMGR_ERROR received error response from zone manager: %1
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+
+
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 9e01155..f29fd05 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -59,6 +59,7 @@
#include <auth/auth_srv.h>
#include <auth/query.h>
#include <auth/statistics.h>
+#include <auth/auth_log.h>
using namespace std;
@@ -104,7 +105,6 @@ public:
/// These members are public because AuthSrv accesses them directly.
ModuleCCSession* config_session_;
- bool verbose_mode_;
AbstractSession* xfrin_session_;
/// In-memory data source. Currently class IN only for simplicity.
@@ -143,11 +143,11 @@ private:
AuthSrvImpl::AuthSrvImpl(const bool use_cache,
AbstractXfroutClient& xfrout_client) :
- config_session_(NULL), verbose_mode_(false),
+ config_session_(NULL),
xfrin_session_(NULL),
memory_datasrc_class_(RRClass::IN()),
statistics_timer_(io_service_),
- counters_(verbose_mode_),
+ counters_(),
keyring_(NULL),
xfrout_connected_(false),
xfrout_client_(xfrout_client)
@@ -251,7 +251,7 @@ public:
void
makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
- const Rcode& rcode, const bool verbose_mode,
+ const Rcode& rcode,
std::auto_ptr<TSIGContext> tsig_context =
std::auto_ptr<TSIGContext>())
{
@@ -289,22 +289,9 @@ makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
} else {
message->toWire(renderer);
}
-
- if (verbose_mode) {
- cerr << "[b10-auth] sending an error response (" <<
- renderer.getLength() << " bytes):\n" << message->toText() << endl;
- }
-}
-}
-
-void
-AuthSrv::setVerbose(const bool on) {
- impl_->verbose_mode_ = on;
+ LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_ERROR_RESPONSE)
+ .arg(message->toText());
}
-
-bool
-AuthSrv::getVerbose() const {
- return (impl_->verbose_mode_);
}
IOService&
@@ -362,15 +349,12 @@ AuthSrv::setMemoryDataSrc(const isc::dns::RRClass& rrclass,
isc_throw(InvalidParameter,
"Memory data source is not supported for RR class "
<< rrclass);
- }
- if (impl_->verbose_mode_) {
- if (!impl_->memory_datasrc_ && memory_datasrc) {
- cerr << "[b10-auth] Memory data source is enabled for class "
- << rrclass << endl;
- } else if (impl_->memory_datasrc_ && !memory_datasrc) {
- cerr << "[b10-auth] Memory data source is disabled for class "
- << rrclass << endl;
- }
+ } else if (!impl_->memory_datasrc_ && memory_datasrc) {
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_ENABLED)
+ .arg(rrclass);
+ } else if (impl_->memory_datasrc_ && !memory_datasrc) {
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_DISABLED)
+ .arg(rrclass);
}
impl_->memory_datasrc_ = memory_datasrc;
}
@@ -392,18 +376,13 @@ AuthSrv::setStatisticsTimerInterval(uint32_t interval) {
}
if (interval == 0) {
impl_->statistics_timer_.cancel();
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_DISABLED);
} else {
impl_->statistics_timer_.setup(boost::bind(&AuthSrv::submitStatistics,
this),
interval * 1000);
- }
- if (impl_->verbose_mode_) {
- if (interval == 0) {
- cerr << "[b10-auth] Disabled statistics timer" << endl;
- } else {
- cerr << "[b10-auth] Set statistics timer to " << interval
- << " seconds" << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_SET)
+ .arg(interval);
}
}
@@ -420,17 +399,13 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
// Ignore all responses.
if (message->getHeaderFlag(Message::HEADERFLAG_QR)) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] received unexpected response, ignoring"
- << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_RESPONSE_RECEIVED);
server->resume(false);
return;
}
} catch (const Exception& ex) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] DNS packet exception: " << ex.what() << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_HEADER_PARSE_FAIL)
+ .arg(ex.what());
server->resume(false);
return;
}
@@ -439,27 +414,21 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
// Parse the message.
message->fromWire(request_buffer);
} catch (const DNSProtocolError& error) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] returning " << error.getRcode().toText()
- << ": " << error.what() << endl;
- }
- makeErrorMessage(message, buffer, error.getRcode(),
- impl_->verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PROTOCOL_ERROR)
+ .arg(error.getRcode().toText()).arg(error.what());
+ makeErrorMessage(message, buffer, error.getRcode());
server->resume(true);
return;
} catch (const Exception& ex) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] returning SERVFAIL: " << ex.what() << endl;
- }
- makeErrorMessage(message, buffer, Rcode::SERVFAIL(),
- impl_->verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PARSE_ERROR)
+ .arg(ex.what());
+ makeErrorMessage(message, buffer, Rcode::SERVFAIL());
server->resume(true);
return;
} // other exceptions will be handled at a higher layer.
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] received a message:\n" << message->toText() << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_PACKET_RECEIVED)
+ .arg(message->toText());
// Perform further protocol-level validation.
// TSIG first
@@ -481,20 +450,16 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
bool sendAnswer = true;
if (tsig_error != TSIGError::NOERROR()) {
- makeErrorMessage(message, buffer, tsig_error.toRcode(),
- impl_->verbose_mode_, tsig_context);
+ makeErrorMessage(message, buffer, tsig_error.toRcode(), tsig_context);
} else if (message->getOpcode() == Opcode::NOTIFY()) {
sendAnswer = impl_->processNotify(io_message, message, buffer,
tsig_context);
} else if (message->getOpcode() != Opcode::QUERY()) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] unsupported opcode" << endl;
- }
- makeErrorMessage(message, buffer, Rcode::NOTIMP(),
- impl_->verbose_mode_, tsig_context);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_UNSUPPORTED_OPCODE)
+ .arg(message->getOpcode().toText());
+ makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
} else if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
- makeErrorMessage(message, buffer, Rcode::FORMERR(),
- impl_->verbose_mode_, tsig_context);
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
} else {
ConstQuestionPtr question = *message->beginQuestion();
const RRType &qtype = question->getType();
@@ -502,8 +467,7 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
sendAnswer = impl_->processAxfrQuery(io_message, message, buffer,
tsig_context);
} else if (qtype == RRType::IXFR()) {
- makeErrorMessage(message, buffer, Rcode::NOTIMP(),
- impl_->verbose_mode_, tsig_context);
+ makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
} else {
sendAnswer = impl_->processNormalQuery(io_message, message, buffer,
tsig_context);
@@ -550,11 +514,8 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
data_sources_.doQuery(query);
}
} catch (const Exception& ex) {
- if (verbose_mode_) {
- cerr << "[b10-auth] Internal error, returning SERVFAIL: " <<
- ex.what() << endl;
- }
- makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_);
+ LOG_ERROR(auth_logger, AUTH_PROCESS_FAIL).arg(ex.what());
+ makeErrorMessage(message, buffer, Rcode::SERVFAIL());
return (true);
}
@@ -567,12 +528,8 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
} else {
message->toWire(renderer);
}
-
- if (verbose_mode_) {
- cerr << "[b10-auth] sending a response ("
- << renderer.getLength()
- << " bytes):\n" << message->toText() << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_NORMAL_RESPONSE)
+ .arg(renderer.getLength()).arg(message->toText());
return (true);
}
@@ -586,11 +543,8 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
incCounter(io_message.getSocket().getProtocol());
if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
- if (verbose_mode_) {
- cerr << "[b10-auth] AXFR query over UDP isn't allowed" << endl;
- }
- makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
- tsig_context);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_UDP);
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
return (true);
}
@@ -613,12 +567,9 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
xfrout_connected_ = false;
}
- if (verbose_mode_) {
- cerr << "[b10-auth] Error in handling XFR request: " << err.what()
- << endl;
- }
- makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_,
- tsig_context);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_ERROR)
+ .arg(err.what());
+ makeErrorMessage(message, buffer, Rcode::SERVFAIL(), tsig_context);
return (true);
}
@@ -633,22 +584,16 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
// The incoming notify must contain exactly one question for SOA of the
// zone name.
if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
- if (verbose_mode_) {
- cerr << "[b10-auth] invalid number of questions in notify: "
- << message->getRRCount(Message::SECTION_QUESTION) << endl;
- }
- makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
- tsig_context);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_QUESTIONS)
+ .arg(message->getRRCount(Message::SECTION_QUESTION));
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
return (true);
}
ConstQuestionPtr question = *message->beginQuestion();
if (question->getType() != RRType::SOA()) {
- if (verbose_mode_) {
- cerr << "[b10-auth] invalid question RR type in notify: "
- << question->getType() << endl;
- }
- makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_,
- tsig_context);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_RRTYPE)
+ .arg(question->getType().toText());
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
return (true);
}
@@ -664,10 +609,7 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
// silent about such cases, but there doesn't seem to be anything we can
// improve at the primary server side by sending an error anyway.
if (xfrin_session_ == NULL) {
- if (verbose_mode_) {
- cerr << "[b10-auth] "
- "session interface for xfrin is not available" << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NO_XFRIN);
return (false);
}
@@ -693,16 +635,12 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
int rcode;
parsed_answer = parseAnswer(rcode, answer);
if (rcode != 0) {
- if (verbose_mode_) {
- cerr << "[b10-auth] failed to notify Zonemgr: "
- << parsed_answer->str() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_ZONEMGR_ERROR)
+ .arg(parsed_answer->str());
return (false);
}
} catch (const Exception& ex) {
- if (verbose_mode_) {
- cerr << "[b10-auth] failed to notify Zonemgr: " << ex.what() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_ZONEMGR_COMMS).arg(ex.what());
return (false);
}
@@ -762,10 +700,7 @@ AuthSrvImpl::setDbFile(ConstElementPtr config) {
} else {
return (answer);
}
-
- if (verbose_mode_) {
- cerr << "[b10-auth] Data source database file: " << db_file_ << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_DATA_SOURCE).arg(db_file_);
// create SQL data source
// Note: the following step is tricky to be exception-safe and to ensure
@@ -795,9 +730,7 @@ AuthSrv::updateConfig(ConstElementPtr new_config) {
}
return (impl_->setDbFile(new_config));
} catch (const isc::Exception& error) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] error: " << error.what() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_CONFIG_UPDATE_FAIL).arg(error.what());
return (isc::config::createAnswer(1, error.what()));
}
}
diff --git a/src/bin/auth/auth_srv.h b/src/bin/auth/auth_srv.h
index 19c97b5..7eede97 100644
--- a/src/bin/auth/auth_srv.h
+++ b/src/bin/auth/auth_srv.h
@@ -124,27 +124,6 @@ public:
isc::util::OutputBufferPtr buffer,
isc::asiodns::DNSServer* server);
- /// \brief Set verbose flag
- ///
- /// \param on The new value of the verbose flag
-
- /// \brief Enable or disable verbose logging.
- ///
- /// This method never throws an exception.
- ///
- /// \param on \c true to enable verbose logging; \c false to disable
- /// verbose logging.
- void setVerbose(const bool on);
-
- /// \brief Returns the logging verbosity of the \c AuthSrv object.
- ///
- /// This method never throws an exception.
- ///
- /// \return \c true if verbose logging is enabled; otherwise \c false.
-
- /// \brief Get the current value of the verbose flag
- bool getVerbose() const;
-
/// \brief Updates the data source for the \c AuthSrv object.
///
/// This method installs or replaces the data source that the \c AuthSrv
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index 77d171f..cf3fe4a 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -12,6 +12,9 @@ query_bench_SOURCES += ../query.h ../query.cc
query_bench_SOURCES += ../auth_srv.h ../auth_srv.cc
query_bench_SOURCES += ../auth_config.h ../auth_config.cc
query_bench_SOURCES += ../statistics.h ../statistics.cc
+query_bench_SOURCES += ../auth_log.h ../auth_log.cc
+
+nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
query_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
query_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/bin/auth/command.cc b/src/bin/auth/command.cc
index eafcae8..fe3d729 100644
--- a/src/bin/auth/command.cc
+++ b/src/bin/auth/command.cc
@@ -27,16 +27,18 @@
#include <config/ccsession.h>
+#include <auth/auth_log.h>
#include <auth/auth_srv.h>
#include <auth/command.h>
-using namespace std;
-using boost::shared_ptr;
using boost::scoped_ptr;
-using namespace isc::dns;
+using boost::shared_ptr;
+using namespace isc::auth;
+using namespace isc::config;
using namespace isc::data;
using namespace isc::datasrc;
-using namespace isc::config;
+using namespace isc::dns;
+using namespace std;
namespace {
/// An exception that is thrown if an error occurs while handling a command
@@ -115,9 +117,7 @@ public:
class SendStatsCommand : public AuthCommand {
public:
virtual void exec(AuthSrv& server, isc::data::ConstElementPtr) {
- if (server.getVerbose()) {
- cerr << "[b10-auth] command 'sendstats' received" << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_SENDSTATS);
server.submitStatistics();
}
};
@@ -140,11 +140,8 @@ public:
oldzone->getOrigin()));
newzone->load(oldzone->getFileName());
oldzone->swap(*newzone);
-
- if (server.getVerbose()) {
- cerr << "[b10-auth] Loaded zone '" << newzone->getOrigin()
- << "'/" << newzone->getClass() << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_LOAD_ZONE)
+ .arg(newzone->getOrigin()).arg(newzone->getClass());
}
private:
@@ -164,10 +161,7 @@ private:
ConstElementPtr datasrc_elem = args->get("datasrc");
if (datasrc_elem) {
if (datasrc_elem->stringValue() == "sqlite3") {
- if (server.getVerbose()) {
- cerr << "[b10-auth] Nothing to do for loading sqlite3"
- << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_SQLITE3);
return (false);
} else if (datasrc_elem->stringValue() != "memory") {
// (note: at this point it's guaranteed that datasrc_elem
@@ -233,18 +227,13 @@ ConstElementPtr
execAuthServerCommand(AuthSrv& server, const string& command_id,
ConstElementPtr args)
{
- if (server.getVerbose()) {
- cerr << "[b10-auth] Received '" << command_id << "' command" << endl;
- }
-
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_COMMAND).arg(command_id);
try {
scoped_ptr<AuthCommand>(createAuthCommand(command_id))->exec(server,
args);
} catch (const isc::Exception& ex) {
- if (server.getVerbose()) {
- cerr << "[b10-auth] Command '" << command_id
- << "' execution failed: " << ex.what() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_COMMAND_FAILED).arg(command_id)
+ .arg(ex.what());
return (createAnswer(1, ex.what()));
}
diff --git a/src/bin/auth/main.cc b/src/bin/auth/main.cc
index 36c616e..c8f6762 100644
--- a/src/bin/auth/main.cc
+++ b/src/bin/auth/main.cc
@@ -44,26 +44,26 @@
#include <auth/command.h>
#include <auth/change_user.h>
#include <auth/auth_srv.h>
+#include <auth/auth_log.h>
#include <asiodns/asiodns.h>
#include <asiolink/asiolink.h>
-#include <log/dummylog.h>
#include <log/logger_support.h>
#include <server_common/keyring.h>
using namespace std;
-using namespace isc::data;
+using namespace isc::asiodns;
+using namespace isc::asiolink;
+using namespace isc::auth;
using namespace isc::cc;
using namespace isc::config;
+using namespace isc::data;
using namespace isc::dns;
+using namespace isc::log;
using namespace isc::util;
using namespace isc::xfr;
-using namespace isc::asiolink;
-using namespace isc::asiodns;
namespace {
-bool verbose_mode = false;
-
/* need global var for config/command handlers.
* todo: turn this around, and put handlers in the authserver
* class itself? */
@@ -89,6 +89,7 @@ usage() {
cerr << "\t-v: verbose output" << endl;
exit(1);
}
+
} // end of anonymous namespace
int
@@ -96,6 +97,7 @@ main(int argc, char* argv[]) {
int ch;
const char* uid = NULL;
bool cache = true;
+ bool verbose = false;
while ((ch = getopt(argc, argv, ":nu:v")) != -1) {
switch (ch) {
@@ -106,8 +108,7 @@ main(int argc, char* argv[]) {
uid = optarg;
break;
case 'v':
- verbose_mode = true;
- isc::log::denabled = true;
+ verbose = true;
break;
case '?':
default:
@@ -121,7 +122,7 @@ main(int argc, char* argv[]) {
// Initialize logging. If verbose, we'll use maximum verbosity.
isc::log::initLogger("b10-auth",
- (verbose_mode ? isc::log::DEBUG : isc::log::INFO),
+ (verbose ? isc::log::DEBUG : isc::log::INFO),
isc::log::MAX_DEBUG_LEVEL, NULL);
int ret = 0;
@@ -144,8 +145,7 @@ main(int argc, char* argv[]) {
}
auth_server = new AuthSrv(cache, xfrout_client);
- auth_server->setVerbose(verbose_mode);
- cout << "[b10-auth] Server created." << endl;
+ LOG_INFO(auth_logger, AUTH_SERVER_CREATED);
SimpleCallback* checkin = auth_server->getCheckinProvider();
IOService& io_service = auth_server->getIOService();
@@ -154,10 +154,10 @@ main(int argc, char* argv[]) {
DNSService dns_service(io_service, checkin, lookup, answer);
auth_server->setDNSService(dns_service);
- cout << "[b10-auth] DNSServices created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_DNS_SERVICES_CREATED);
cc_session = new Session(io_service.get_io_service());
- cout << "[b10-auth] Configuration session channel created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_CREATED);
// We delay starting listening to new commands/config just before we
// go into the main loop to avoid confusion due to mixture of
@@ -167,19 +167,19 @@ main(int argc, char* argv[]) {
config_session = new ModuleCCSession(specfile, *cc_session,
my_config_handler,
my_command_handler, false);
- cout << "[b10-auth] Configuration channel established." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_ESTABLISHED);
xfrin_session = new Session(io_service.get_io_service());
- cout << "[b10-auth] Xfrin session channel created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_CREATED);
xfrin_session->establish(NULL);
xfrin_session_established = true;
- cout << "[b10-auth] Xfrin session channel established." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_ESTABLISHED);
statistics_session = new Session(io_service.get_io_service());
- cout << "[b10-auth] Statistics session channel created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_CREATED);
statistics_session->establish(NULL);
statistics_session_established = true;
- cout << "[b10-auth] Statistics session channel established." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_ESTABLISHED);
auth_server->setXfrinSession(xfrin_session);
auth_server->setStatisticsSession(statistics_session);
@@ -188,33 +188,34 @@ main(int argc, char* argv[]) {
// all initial configurations, but as a short term workaround we
// handle the traditional "database_file" setup by directly calling
// updateConfig().
- // if server load configure failed, we won't exit, give user second chance
- // to correct the configure.
+ // if server load configure failed, we won't exit, give user second
+ // chance to correct the configure.
auth_server->setConfigSession(config_session);
try {
configureAuthServer(*auth_server, config_session->getFullConfig());
auth_server->updateConfig(ElementPtr());
} catch (const AuthConfigError& ex) {
- cout << "[bin10-auth] Server load config failed:" << ex.what() << endl;
+ LOG_ERROR(auth_logger, AUTH_CONFIG_LOAD_FAIL).arg(ex.what());
}
if (uid != NULL) {
changeUser(uid);
}
- cout << "[b10-auth] Loading TSIG keys" << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_LOAD_TSIG);
isc::server_common::initKeyring(*config_session);
auth_server->setTSIGKeyRing(&isc::server_common::keyring);
// Now start asynchronous read.
config_session->start();
- cout << "[b10-auth] Configuration channel started." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_STARTED);
- cout << "[b10-auth] Server started." << endl;
+ // Successfully initialized.
+ LOG_INFO(auth_logger, AUTH_SERVER_STARTED);
io_service.run();
} catch (const std::exception& ex) {
- cerr << "[b10-auth] Server failed: " << ex.what() << endl;
+ LOG_FATAL(auth_logger, AUTH_SERVER_FAILED).arg(ex.what());
ret = 1;
}
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 415aa14..76e5007 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -13,6 +13,7 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <auth/statistics.h>
+#include <auth/auth_log.h>
#include <cc/data.h>
#include <cc/session.h>
@@ -20,6 +21,8 @@
#include <sstream>
#include <iostream>
+using namespace isc::auth;
+
// TODO: We need a namespace ("auth_server"?) to hold
// AuthSrv and AuthCounters.
@@ -29,10 +32,7 @@ private:
AuthCountersImpl(const AuthCountersImpl& source);
AuthCountersImpl& operator=(const AuthCountersImpl& source);
public:
- // References verbose_mode flag in AuthSrvImpl
- // TODO: Fix this short term workaround for logging
- // after we have logging framework
- AuthCountersImpl(const bool& verbose_mode);
+ AuthCountersImpl();
~AuthCountersImpl();
void inc(const AuthCounters::CounterType type);
bool submitStatistics() const;
@@ -42,15 +42,13 @@ public:
private:
std::vector<uint64_t> counters_;
isc::cc::AbstractSession* statistics_session_;
- const bool& verbose_mode_;
};
-AuthCountersImpl::AuthCountersImpl(const bool& verbose_mode) :
+AuthCountersImpl::AuthCountersImpl() :
// initialize counter
// size: AuthCounters::COUNTER_TYPES, initial value: 0
counters_(AuthCounters::COUNTER_TYPES, 0),
- statistics_session_(NULL),
- verbose_mode_(verbose_mode)
+ statistics_session_(NULL)
{}
AuthCountersImpl::~AuthCountersImpl()
@@ -64,11 +62,7 @@ AuthCountersImpl::inc(const AuthCounters::CounterType type) {
bool
AuthCountersImpl::submitStatistics() const {
if (statistics_session_ == NULL) {
- if (verbose_mode_) {
- std::cerr << "[b10-auth] "
- << "session interface for statistics"
- << " is not available" << std::endl;
- }
+ LOG_ERROR(auth_logger, AUTH_NO_STATS_SESSION);
return (false);
}
std::stringstream statistics_string;
@@ -95,18 +89,10 @@ AuthCountersImpl::submitStatistics() const {
// currently it just returns empty message
statistics_session_->group_recvmsg(env, answer, false, seq);
} catch (const isc::cc::SessionError& ex) {
- if (verbose_mode_) {
- std::cerr << "[b10-auth] "
- << "communication error in sending statistics data: "
- << ex.what() << std::endl;
- }
+ LOG_ERROR(auth_logger, AUTH_STATS_COMMS).arg(ex.what());
return (false);
} catch (const isc::cc::SessionTimeout& ex) {
- if (verbose_mode_) {
- std::cerr << "[b10-auth] "
- << "timeout happened while sending statistics data: "
- << ex.what() << std::endl;
- }
+ LOG_ERROR(auth_logger, AUTH_STATS_TIMEOUT).arg(ex.what());
return (false);
}
return (true);
@@ -125,8 +111,7 @@ AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
return (counters_.at(type));
}
-AuthCounters::AuthCounters(const bool& verbose_mode) :
- impl_(new AuthCountersImpl(verbose_mode))
+AuthCounters::AuthCounters() : impl_(new AuthCountersImpl())
{}
AuthCounters::~AuthCounters() {
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 9e5240e..5bf6436 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -61,15 +61,10 @@ public:
};
/// The constructor.
///
- /// \param verbose_mode reference to verbose_mode_ of AuthSrvImpl
- ///
/// This constructor is mostly exception free. But it may still throw
/// a standard exception if memory allocation fails inside the method.
///
- /// \todo Fix this short term workaround for logging
- /// after we have logging framework.
- ///
- AuthCounters(const bool& verbose_mode);
+ AuthCounters();
/// The destructor.
///
/// This method never throws an exception.
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index a4620f5..71520c2 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -22,6 +22,7 @@ TESTS += run_unittests
run_unittests_SOURCES = $(top_srcdir)/src/lib/dns/tests/unittest_util.h
run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
run_unittests_SOURCES += ../auth_srv.h ../auth_srv.cc
+run_unittests_SOURCES += ../auth_log.h ../auth_log.cc
run_unittests_SOURCES += ../query.h ../query.cc
run_unittests_SOURCES += ../change_user.h ../change_user.cc
run_unittests_SOURCES += ../auth_config.h ../auth_config.cc
@@ -36,6 +37,9 @@ run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += change_user_unittest.cc
run_unittests_SOURCES += statistics_unittest.cc
run_unittests_SOURCES += run_unittests.cc
+
+nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc
+
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index d922901..2b20d65 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -190,15 +190,6 @@ TEST_F(AuthSrvTest, unsupportedRequest) {
unsupportedRequest();
}
-// Simple API check
-TEST_F(AuthSrvTest, verbose) {
- EXPECT_FALSE(server.getVerbose());
- server.setVerbose(true);
- EXPECT_TRUE(server.getVerbose());
- server.setVerbose(false);
- EXPECT_FALSE(server.getVerbose());
-}
-
// Multiple questions. Should result in FORMERR.
TEST_F(AuthSrvTest, multiQuestion) {
multiQuestion();
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index 062b70d..9a3dded 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -69,13 +69,12 @@ private:
};
protected:
- AuthCountersTest() : verbose_mode_(false), counters(verbose_mode_) {
+ AuthCountersTest() : counters() {
counters.setStatisticsSession(&statistics_session_);
}
~AuthCountersTest() {
}
MockSession statistics_session_;
- bool verbose_mode_;
AuthCounters counters;
};
diff --git a/src/bin/cfgmgr/plugins/b10logging.py b/src/bin/cfgmgr/plugins/b10logging.py
index 6af3f66..e288c6d 100644
--- a/src/bin/cfgmgr/plugins/b10logging.py
+++ b/src/bin/cfgmgr/plugins/b10logging.py
@@ -48,6 +48,19 @@ def check(config):
for logger in config['loggers']:
# name should always be present
name = logger['name']
+ # report an error if name starts with * but not *.,
+ # or if * is not the first character.
+ # TODO: we might want to also warn or error if the
+ # logger name is not an existing module, but we can't
+ # really tell that from here at this point
+ star_pos = name.find('*')
+ if star_pos > 0 or\
+ name == '*.' or\
+ (star_pos == 0 and len(name) > 1 and name[1] != '.'):
+ errors.append("Bad logger name: '" + name + "': * can "
+ "only be used instead of the full "
+ "first-level name, e.g. '*' or "
+ "'*.subsystem'")
if 'severity' in logger and\
logger['severity'].lower() not in ALLOWED_SEVERITIES:
@@ -71,11 +84,11 @@ def check(config):
'output' in output_option and\
output_option['output'] not in ALLOWED_STREAMS:
errors.append("bad output for logger " + name +
- ": " + output_option['stream'] +
+ ": " + output_option['output'] +
", must be stdout or stderr")
elif destination == "file" and\
- 'output' not in output_option or\
- output_option['output'] == "":
+ ('output' not in output_option or\
+ output_option['output'] == ""):
errors.append("destination set to file but "
"output not set to any "
"filename for logger "
diff --git a/src/bin/cfgmgr/plugins/tests/Makefile.am b/src/bin/cfgmgr/plugins/tests/Makefile.am
index 725d391..07b7a85 100644
--- a/src/bin/cfgmgr/plugins/tests/Makefile.am
+++ b/src/bin/cfgmgr/plugins/tests/Makefile.am
@@ -1,5 +1,5 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = tsig_keys_test.py
+PYTESTS = tsig_keys_test.py logging_test.py
EXTRA_DIST = $(PYTESTS)
diff --git a/src/bin/cfgmgr/plugins/tests/logging_test.py b/src/bin/cfgmgr/plugins/tests/logging_test.py
new file mode 100644
index 0000000..818a596
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/tests/logging_test.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Make sure we can load the module, put it into path
+import sys
+import os
+sys.path.extend(os.environ["B10_TEST_PLUGIN_DIR"].split(':'))
+
+import b10logging
+import unittest
+
+class LoggingConfCheckTest(unittest.TestCase):
+ def test_load(self):
+ """
+ Checks the entry point returns the correct values.
+ """
+ (spec, check) = b10logging.load()
+ # It returns the checking function
+ self.assertEqual(check, b10logging.check)
+ # The plugin stores it's spec
+ self.assertEqual(spec, b10logging.spec)
+
+ def test_logger_conf(self):
+ self.assertEqual(None,
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'DEBUG',
+ 'debuglevel': 50,
+ 'output_options':
+ [{'destination': 'file',
+ 'output': '/some/file'
+ }]
+ },
+ {'name': 'b10-resolver',
+ 'severity': 'WARN',
+ 'additive': True,
+ 'output_options':
+ [{'destination': 'console',
+ 'output': 'stderr',
+ 'flush': True
+ }]
+ },
+ {'name': 'b10-resolver.resolver',
+ 'severity': 'ERROR',
+ 'output_options': []
+ },
+ {'name': '*.cache',
+ 'severity': 'INFO'
+ }
+ ]}))
+ def do_bad_name_test(self, name):
+ err_str = "Bad logger name: '" + name + "': * can only be "\
+ "used instead of the full first-level name, e.g. "\
+ "'*' or '*.subsystem'"
+ self.assertEqual(err_str,
+ b10logging.check({'loggers':
+ [{'name': name,
+ 'severity': 'DEBUG'},
+ ]}))
+
+ def test_logger_bad_name(self):
+ self.do_bad_name_test("*.")
+ self.do_bad_name_test("*foo")
+ self.do_bad_name_test("*foo.lib")
+ self.do_bad_name_test("foo*")
+ self.do_bad_name_test("foo*.lib")
+
+ def test_logger_bad_severity(self):
+ self.assertEqual('bad severity value for logger *: BADVAL',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'BADVAL'}]}))
+
+ def test_logger_bad_destination(self):
+ self.assertEqual('bad destination for logger *: baddest',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'baddest' }
+ ]}]}))
+
+ def test_logger_bad_console_output(self):
+ self.assertEqual('bad output for logger *: bad_output, must be stdout or stderr',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'console',
+ 'output': 'bad_output'
+ }
+ ]}]}))
+
+ def test_logger_bad_file_output(self):
+ self.assertEqual('destination set to file but output not set to any filename for logger *',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'file' }
+ ]}]}))
+
+ def test_logger_bad_syslog_output(self):
+ self.assertEqual('destination set to syslog but output not set to any facility for logger *',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'syslog' }
+ ]}]}))
+
+ def test_logger_bad_type(self):
+ self.assertEqual('123 should be a string',
+ b10logging.check({'loggers':
+ [{'name': 123,
+ 'severity': 'INFO'}]}))
+ self.assertEqual('123 should be a string',
+ b10logging.check({'loggers':
+ [{'name': 'bind10',
+ 'severity': 123}]}))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/bin/resolver/Makefile.am b/src/bin/resolver/Makefile.am
index e826081..bce8307 100644
--- a/src/bin/resolver/Makefile.am
+++ b/src/bin/resolver/Makefile.am
@@ -20,10 +20,10 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
CLEANFILES = *.gcno *.gcda
CLEANFILES += resolver.spec spec_config.h
-CLEANFILES += resolverdef.cc resolverdef.h
+CLEANFILES += resolver_messages.cc resolver_messages.h
man_MANS = b10-resolver.8
-EXTRA_DIST = $(man_MANS) b10-resolver.xml resolverdef.mes
+EXTRA_DIST = $(man_MANS) b10-resolver.xml resolver_messages.mes
if ENABLE_MAN
@@ -39,11 +39,11 @@ spec_config.h: spec_config.h.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
# Define rule to build logging source files from message file
-resolverdef.h resolverdef.cc: resolverdef.mes
- $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/resolver/resolverdef.mes
+resolver_messages.h resolver_messages.cc: resolver_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/resolver/resolver_messages.mes
-BUILT_SOURCES = spec_config.h resolverdef.cc resolverdef.h
+BUILT_SOURCES = spec_config.h resolver_messages.cc resolver_messages.h
pkglibexec_PROGRAMS = b10-resolver
b10_resolver_SOURCES = resolver.cc resolver.h
@@ -53,7 +53,7 @@ b10_resolver_SOURCES += $(top_builddir)/src/bin/auth/change_user.h
b10_resolver_SOURCES += $(top_builddir)/src/bin/auth/common.h
b10_resolver_SOURCES += main.cc
-nodist_b10_resolver_SOURCES = resolverdef.cc resolverdef.h
+nodist_b10_resolver_SOURCES = resolver_messages.cc resolver_messages.h
b10_resolver_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
diff --git a/src/bin/resolver/main.cc b/src/bin/resolver/main.cc
index 530f689..d9c30b9 100644
--- a/src/bin/resolver/main.cc
+++ b/src/bin/resolver/main.cc
@@ -80,7 +80,7 @@ my_command_handler(const string& command, ConstElementPtr args) {
ConstElementPtr answer = createAnswer();
if (command == "print_message") {
- LOG_INFO(resolver_logger, RESOLVER_PRINTMSG).arg(args);
+ LOG_INFO(resolver_logger, RESOLVER_PRINT_COMMAND).arg(args);
/* let's add that message to our answer as well */
answer = createAnswer(0, args);
} else if (command == "shutdown") {
@@ -203,14 +203,14 @@ main(int argc, char* argv[]) {
DNSService dns_service(io_service, checkin, lookup, answer);
resolver->setDNSService(dns_service);
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_SERVICE);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_SERVICE_CREATED);
cc_session = new Session(io_service.get_io_service());
config_session = new ModuleCCSession(specfile, *cc_session,
my_config_handler,
my_command_handler,
true, true);
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIGCHAN);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIG_CHANNEL);
// FIXME: This does not belong here, but inside Boss
if (uid != NULL) {
@@ -218,7 +218,11 @@ main(int argc, char* argv[]) {
}
resolver->setConfigSession(config_session);
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIGLOAD);
+ // Install all initial configurations. If loading configuration
+ // fails, it will be logged, but we start the server anyway, giving
+ // the user a second chance to correct the configuration.
+ resolver->updateConfig(config_session->getFullConfig());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIG_LOADED);
LOG_INFO(resolver_logger, RESOLVER_STARTED);
io_service.run();
diff --git a/src/bin/resolver/resolver.cc b/src/bin/resolver/resolver.cc
index 934fbdf..4a937c6 100644
--- a/src/bin/resolver/resolver.cc
+++ b/src/bin/resolver/resolver.cc
@@ -20,12 +20,18 @@
#include <vector>
#include <cassert>
+#include <boost/shared_ptr.hpp>
+#include <boost/lexical_cast.hpp>
+#include <boost/foreach.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <acl/acl.h>
+#include <acl/loader.h>
+
#include <asiodns/asiodns.h>
#include <asiolink/asiolink.h>
-#include <boost/foreach.hpp>
-#include <boost/lexical_cast.hpp>
-
#include <config/ccsession.h>
#include <exceptions/exceptions.h>
@@ -41,6 +47,8 @@
#include <dns/rrttl.h>
#include <dns/message.h>
#include <dns/messagerenderer.h>
+
+#include <server_common/client.h>
#include <server_common/portconfig.h>
#include <resolve/recursive_query.h>
@@ -49,14 +57,17 @@
#include "resolver_log.h"
using namespace std;
+using namespace boost;
using namespace isc;
using namespace isc::util;
+using namespace isc::acl;
using namespace isc::dns;
using namespace isc::data;
using namespace isc::config;
using namespace isc::asiodns;
using namespace isc::asiolink;
+using namespace isc::server_common;
using namespace isc::server_common::portconfig;
class ResolverImpl {
@@ -71,6 +82,7 @@ public:
client_timeout_(4000),
lookup_timeout_(30000),
retries_(3),
+ query_acl_(new Resolver::ClientACL(REJECT)),
rec_query_(NULL)
{}
@@ -83,7 +95,7 @@ public:
isc::cache::ResolverCache& cache)
{
assert(!rec_query_); // queryShutdown must be called first
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_QUSETUP);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_QUERY_SETUP);
rec_query_ = new RecursiveQuery(dnss,
nsas, cache,
upstream_,
@@ -99,7 +111,8 @@ public:
// (this is not a safety check, just to prevent logging of
// actions that are not performed
if (rec_query_) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_QUSHUT);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT,
+ RESOLVER_QUERY_SHUTDOWN);
delete rec_query_;
rec_query_ = NULL;
}
@@ -112,7 +125,7 @@ public:
if (dnss) {
if (!upstream_.empty()) {
BOOST_FOREACH(const AddressPair& address, upstream) {
- LOG_INFO(resolver_logger, RESOLVER_FWDADDR)
+ LOG_INFO(resolver_logger, RESOLVER_FORWARD_ADDRESS)
.arg(address.first).arg(address.second);
}
} else {
@@ -128,11 +141,11 @@ public:
if (dnss) {
if (!upstream_root_.empty()) {
BOOST_FOREACH(const AddressPair& address, upstream_root) {
- LOG_INFO(resolver_logger, RESOLVER_ROOTADDR)
+ LOG_INFO(resolver_logger, RESOLVER_SET_ROOT_ADDRESS)
.arg(address.first).arg(address.second);
}
} else {
- LOG_WARN(resolver_logger, RESOLVER_NOROOTADDR);
+ LOG_WARN(resolver_logger, RESOLVER_NO_ROOT_ADDRESS);
}
}
}
@@ -140,10 +153,20 @@ public:
void resolve(const isc::dns::QuestionPtr& question,
const isc::resolve::ResolverInterface::CallbackPtr& callback);
- void processNormalQuery(ConstMessagePtr query_message,
- MessagePtr answer_message,
- OutputBufferPtr buffer,
- DNSServer* server);
+ enum NormalQueryResult { RECURSION, DROPPED, ERROR };
+ NormalQueryResult processNormalQuery(const IOMessage& io_message,
+ MessagePtr query_message,
+ MessagePtr answer_message,
+ OutputBufferPtr buffer,
+ DNSServer* server);
+
+ const Resolver::ClientACL& getQueryACL() const {
+ return (*query_acl_);
+ }
+
+ void setQueryACL(shared_ptr<const Resolver::ClientACL> new_acl) {
+ query_acl_ = new_acl;
+ }
/// Currently non-configurable, but will be.
static const uint16_t DEFAULT_LOCAL_UDPSIZE = 4096;
@@ -168,6 +191,8 @@ public:
unsigned retries_;
private:
+ /// ACL on incoming queries
+ shared_ptr<const Resolver::ClientACL> query_acl_;
/// Object to handle upstream queries
RecursiveQuery* rec_query_;
@@ -302,7 +327,8 @@ public:
answer_message->toWire(renderer);
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL, RESOLVER_DNSMSGSENT)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL,
+ RESOLVER_DNS_MESSAGE_SENT)
.arg(renderer.getLength()).arg(*answer_message);
}
};
@@ -395,13 +421,12 @@ Resolver::processMessage(const IOMessage& io_message,
// Ignore all responses.
if (query_message->getHeaderFlag(Message::HEADERFLAG_QR)) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_UNEXRESP);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_UNEXPECTED_RESPONSE);
server->resume(false);
return;
}
-
} catch (const Exception& ex) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_HDRERR)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_HEADER_ERROR)
.arg(ex.what());
server->resume(false);
return;
@@ -411,14 +436,14 @@ Resolver::processMessage(const IOMessage& io_message,
try {
query_message->fromWire(request_buffer);
} catch (const DNSProtocolError& error) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_PROTERR)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_PROTOCOL_ERROR)
.arg(error.what()).arg(error.getRcode());
makeErrorMessage(query_message, answer_message,
buffer, error.getRcode());
server->resume(true);
return;
} catch (const Exception& ex) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_PROTERR)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_MESSAGE_ERROR)
.arg(ex.what()).arg(Rcode::SERVFAIL());
makeErrorMessage(query_message, answer_message,
buffer, Rcode::SERVFAIL());
@@ -429,78 +454,45 @@ Resolver::processMessage(const IOMessage& io_message,
// Note: there appears to be no LOG_DEBUG for a successfully-received
// message. This is not an oversight - it is handled below. In the
// meantime, output the full message for debug purposes (if requested).
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL, RESOLVER_DNSMSGRCVD)
- .arg(*query_message);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL,
+ RESOLVER_DNS_MESSAGE_RECEIVED).arg(*query_message);
// Perform further protocol-level validation.
- bool sendAnswer = true;
+ bool send_answer = true;
if (query_message->getOpcode() == Opcode::NOTIFY()) {
makeErrorMessage(query_message, answer_message,
buffer, Rcode::NOTAUTH());
// Notify arrived, but we are not authoritative.
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_NFYNOTAUTH);
-
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+ RESOLVER_NOTIFY_RECEIVED);
} else if (query_message->getOpcode() != Opcode::QUERY()) {
-
// Unsupported opcode.
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_OPCODEUNS)
- .arg(query_message->getOpcode());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+ RESOLVER_UNSUPPORTED_OPCODE).arg(query_message->getOpcode());
makeErrorMessage(query_message, answer_message,
buffer, Rcode::NOTIMP());
-
} else if (query_message->getRRCount(Message::SECTION_QUESTION) != 1) {
-
// Not one question
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_NOTONEQUES)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+ RESOLVER_NOT_ONE_QUESTION)
.arg(query_message->getRRCount(Message::SECTION_QUESTION));
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::FORMERR());
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::FORMERR());
} else {
- ConstQuestionPtr question = *query_message->beginQuestion();
- const RRType &qtype = question->getType();
- if (qtype == RRType::AXFR()) {
- if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
-
- // Can't process AXFR request receoved over UDP
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
- RESOLVER_AXFRUDP);
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::FORMERR());
- } else {
-
- // ... or over TCP for that matter
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
- RESOLVER_AXFRTCP);
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::NOTIMP());
- }
- } else if (qtype == RRType::IXFR()) {
-
- // Can't process IXFR request
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_IXFR);
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::NOTIMP());
-
- } else if (question->getClass() != RRClass::IN()) {
-
- // Non-IN message received, refuse it.
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_NOTIN)
- .arg(question->getClass());
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::REFUSED());
- } else {
+ const ResolverImpl::NormalQueryResult result =
+ impl_->processNormalQuery(io_message, query_message,
+ answer_message, buffer, server);
+ if (result == ResolverImpl::RECURSION) {
// The RecursiveQuery object will post the "resume" event to the
// DNSServer when an answer arrives, so we don't have to do it now.
- sendAnswer = false;
- impl_->processNormalQuery(query_message, answer_message,
- buffer, server);
+ return;
+ } else if (result == ResolverImpl::DROPPED) {
+ send_answer = false;
}
}
- if (sendAnswer) {
- server->resume(true);
- }
+ server->resume(send_answer);
}
void
@@ -510,29 +502,107 @@ ResolverImpl::resolve(const QuestionPtr& question,
rec_query_->resolve(question, callback);
}
-void
-ResolverImpl::processNormalQuery(ConstMessagePtr query_message,
+ResolverImpl::NormalQueryResult
+ResolverImpl::processNormalQuery(const IOMessage& io_message,
+ MessagePtr query_message,
MessagePtr answer_message,
OutputBufferPtr buffer,
DNSServer* server)
{
+ const ConstQuestionPtr question = *query_message->beginQuestion();
+ const RRType qtype = question->getType();
+ const RRClass qclass = question->getClass();
+
+ // Apply query ACL
+ Client client(io_message);
+ const BasicAction query_action(getQueryACL().execute(client));
+ if (query_action == isc::acl::REJECT) {
+ LOG_INFO(resolver_logger, RESOLVER_QUERY_REJECTED)
+ .arg(question->getName()).arg(qtype).arg(qclass).arg(client);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::REFUSED());
+ return (ERROR);
+ } else if (query_action == isc::acl::DROP) {
+ LOG_INFO(resolver_logger, RESOLVER_QUERY_DROPPED)
+ .arg(question->getName()).arg(qtype).arg(qclass).arg(client);
+ return (DROPPED);
+ }
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_QUERY_ACCEPTED)
+ .arg(question->getName()).arg(qtype).arg(question->getClass())
+ .arg(client);
+
+ // ACL passed. Reject inappropriate queries for the resolver.
+ if (qtype == RRType::AXFR()) {
+ if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
+ // Can't process AXFR request receoved over UDP
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_AXFR_UDP);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::FORMERR());
+ } else {
+ // ... or over TCP for that matter
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_AXFR_TCP);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::NOTIMP());
+ }
+ return (ERROR);
+ } else if (qtype == RRType::IXFR()) {
+ // Can't process IXFR request
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_IXFR);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::NOTIMP());
+ return (ERROR);
+ } else if (qclass != RRClass::IN()) {
+ // Non-IN message received, refuse it.
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_NON_IN_PACKET)
+ .arg(question->getClass());
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::REFUSED());
+ return (ERROR);
+ }
+
+ // Everything is okay. Start resolver.
if (upstream_.empty()) {
// Processing normal query
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_NORMQUERY);
- ConstQuestionPtr question = *query_message->beginQuestion();
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_NORMAL_QUERY);
rec_query_->resolve(*question, answer_message, buffer, server);
-
} else {
-
// Processing forward query
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_FWDQUERY);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_FORWARD_QUERY);
rec_query_->forward(query_message, answer_message, buffer, server);
}
+
+ return (RECURSION);
+}
+
+namespace {
+// This is a simplified ACL parser for the initial implementation with minimal
+// external dependency. For a longer term we'll switch to a more generic
+// loader with allowing more complicated ACL syntax.
+shared_ptr<const Resolver::ClientACL>
+createQueryACL(isc::data::ConstElementPtr acl_config) {
+ if (!acl_config) {
+ return (shared_ptr<const Resolver::ClientACL>());
+ }
+
+ shared_ptr<Resolver::ClientACL> new_acl(
+ new Resolver::ClientACL(REJECT));
+ BOOST_FOREACH(ConstElementPtr rule, acl_config->listValue()) {
+ ConstElementPtr action = rule->get("action");
+ ConstElementPtr from = rule->get("from");
+ if (!action || !from) {
+ isc_throw(BadValue, "query ACL misses mandatory parameter");
+ }
+ new_acl->append(shared_ptr<IPCheck<Client> >(
+ new IPCheck<Client>(from->stringValue())),
+ defaultActionLoader(action));
+ }
+ return (new_acl);
+}
}
ConstElementPtr
Resolver::updateConfig(ConstElementPtr config) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_CONFIGUPD)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_CONFIG_UPDATED)
.arg(*config);
try {
@@ -546,6 +616,8 @@ Resolver::updateConfig(ConstElementPtr config) {
ConstElementPtr listenAddressesE(config->get("listen_on"));
AddressList listenAddresses(parseAddresses(listenAddressesE,
"listen_on"));
+ shared_ptr<const ClientACL> query_acl(createQueryACL(
+ config->get("query_acl")));
bool set_timeouts(false);
int qtimeout = impl_->query_timeout_;
int ctimeout = impl_->client_timeout_;
@@ -560,7 +632,8 @@ Resolver::updateConfig(ConstElementPtr config) {
// check for us
qtimeout = qtimeoutE->intValue();
if (qtimeout < -1) {
- LOG_ERROR(resolver_logger, RESOLVER_QUTMOSMALL).arg(qtimeout);
+ LOG_ERROR(resolver_logger, RESOLVER_QUERY_TIME_SMALL)
+ .arg(qtimeout);
isc_throw(BadValue, "Query timeout too small");
}
set_timeouts = true;
@@ -568,7 +641,8 @@ Resolver::updateConfig(ConstElementPtr config) {
if (ctimeoutE) {
ctimeout = ctimeoutE->intValue();
if (ctimeout < -1) {
- LOG_ERROR(resolver_logger, RESOLVER_CLTMOSMALL).arg(ctimeout);
+ LOG_ERROR(resolver_logger, RESOLVER_CLIENT_TIME_SMALL)
+ .arg(ctimeout);
isc_throw(BadValue, "Client timeout too small");
}
set_timeouts = true;
@@ -576,7 +650,8 @@ Resolver::updateConfig(ConstElementPtr config) {
if (ltimeoutE) {
ltimeout = ltimeoutE->intValue();
if (ltimeout < -1) {
- LOG_ERROR(resolver_logger, RESOLVER_LKTMOSMALL).arg(ltimeout);
+ LOG_ERROR(resolver_logger, RESOLVER_LOOKUP_TIME_SMALL)
+ .arg(ltimeout);
isc_throw(BadValue, "Lookup timeout too small");
}
set_timeouts = true;
@@ -586,7 +661,7 @@ Resolver::updateConfig(ConstElementPtr config) {
// _after_ the comparison (as opposed to before it for the timeouts)
// because "retries" is unsigned.
if (retriesE->intValue() < 0) {
- LOG_ERROR(resolver_logger, RESOLVER_RETRYNEG)
+ LOG_ERROR(resolver_logger, RESOLVER_NEGATIVE_RETRIES)
.arg(retriesE->intValue());
isc_throw(BadValue, "Negative number of retries");
}
@@ -600,15 +675,6 @@ Resolver::updateConfig(ConstElementPtr config) {
if (listenAddressesE) {
setListenAddresses(listenAddresses);
need_query_restart = true;
- } else {
- if (!configured_) {
- // TODO: ModuleSpec needs getDefault()
- AddressList initial_addresses;
- initial_addresses.push_back(AddressPair("127.0.0.1", 53));
- initial_addresses.push_back(AddressPair("::1", 53));
- setListenAddresses(initial_addresses);
- need_query_restart = true;
- }
}
if (forwardAddressesE) {
setForwardAddresses(forwardAddresses);
@@ -622,6 +688,9 @@ Resolver::updateConfig(ConstElementPtr config) {
setTimeouts(qtimeout, ctimeout, ltimeout, retries);
need_query_restart = true;
}
+ if (query_acl) {
+ setQueryACL(query_acl);
+ }
if (need_query_restart) {
impl_->queryShutdown();
@@ -633,7 +702,7 @@ Resolver::updateConfig(ConstElementPtr config) {
} catch (const isc::Exception& error) {
// Configuration error
- LOG_ERROR(resolver_logger, RESOLVER_CONFIGERR).arg(error.what());
+ LOG_ERROR(resolver_logger, RESOLVER_CONFIG_ERROR).arg(error.what());
return (isc::config::createAnswer(1, error.what()));
}
}
@@ -673,7 +742,7 @@ Resolver::setListenAddresses(const AddressList& addresses) {
void
Resolver::setTimeouts(int query_timeout, int client_timeout,
int lookup_timeout, unsigned retries) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_SETPARAM)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_SET_PARAMS)
.arg(query_timeout).arg(client_timeout).arg(lookup_timeout)
.arg(retries);
@@ -707,3 +776,18 @@ AddressList
Resolver::getListenAddresses() const {
return (impl_->listen_);
}
+
+const Resolver::ClientACL&
+Resolver::getQueryACL() const {
+ return (impl_->getQueryACL());
+}
+
+void
+Resolver::setQueryACL(shared_ptr<const ClientACL> new_acl) {
+ if (!new_acl) {
+ isc_throw(InvalidParameter, "NULL pointer is passed to setQueryACL");
+ }
+
+ LOG_INFO(resolver_logger, RESOLVER_SET_QUERY_ACL);
+ impl_->setQueryACL(new_acl);
+}
diff --git a/src/bin/resolver/resolver.h b/src/bin/resolver/resolver.h
index 2890dd3..9c78126 100644
--- a/src/bin/resolver/resolver.h
+++ b/src/bin/resolver/resolver.h
@@ -19,6 +19,10 @@
#include <vector>
#include <utility>
+#include <boost/shared_ptr.hpp>
+
+#include <acl/acl.h>
+
#include <cc/data.h>
#include <config/ccsession.h>
#include <dns/message.h>
@@ -37,6 +41,12 @@
#include <resolve/resolver_interface.h>
+namespace isc {
+namespace server_common {
+class Client;
+}
+}
+
class ResolverImpl;
/**
@@ -236,6 +246,27 @@ public:
*/
int getRetries() const;
+ // Shortcut typedef used for query ACL.
+ typedef isc::acl::ACL<isc::server_common::Client> ClientACL;
+
+ /// Get the query ACL.
+ ///
+ /// \exception None
+ const ClientACL& getQueryACL() const;
+
+ /// Set the new query ACL.
+ ///
+ /// This method replaces the existing query ACL completely.
+ /// Normally this method will be called via the configuration handler,
+ /// but is publicly available for convenience of tests (and other
+ /// experimental purposes).
+ /// \c new_acl must not be a NULL pointer.
+ ///
+ /// \exception InvalidParameter The given pointer is NULL
+ ///
+ /// \param new_acl The new ACL to replace the existing one.
+ void setQueryACL(boost::shared_ptr<const ClientACL> new_acl);
+
private:
ResolverImpl* impl_;
isc::asiodns::DNSService* dnss_;
diff --git a/src/bin/resolver/resolver.spec.pre.in b/src/bin/resolver/resolver.spec.pre.in
index 9df1e75..076ef85 100644
--- a/src/bin/resolver/resolver.spec.pre.in
+++ b/src/bin/resolver/resolver.spec.pre.in
@@ -113,6 +113,41 @@
}
]
}
+ },
+ {
+ "item_name": "query_acl",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [
+ {
+ "action": "ACCEPT",
+ "from": "127.0.0.1"
+ },
+ {
+ "action": "ACCEPT",
+ "from": "::1"
+ }
+ ],
+ "list_item_spec": {
+ "item_name": "rule",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "action",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ {
+ "item_name": "from",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/resolver/resolver_log.h b/src/bin/resolver/resolver_log.h
index 63f6abb..8378b98 100644
--- a/src/bin/resolver/resolver_log.h
+++ b/src/bin/resolver/resolver_log.h
@@ -16,7 +16,7 @@
#define __RESOLVER_LOG__H
#include <log/macros.h>
-#include "resolverdef.h"
+#include "resolver_messages.h"
/// \brief Resolver Logging
///
diff --git a/src/bin/resolver/resolver_messages.mes b/src/bin/resolver/resolver_messages.mes
new file mode 100644
index 0000000..6c5be64
--- /dev/null
+++ b/src/bin/resolver/resolver_messages.mes
@@ -0,0 +1,219 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# along with the resolver methods.
+
+% RESOLVER_AXFR_TCP AXFR request received over TCP
+A debug message, the resolver received a NOTIFY message over TCP. The server
+cannot process it and will return an error message to the sender with the
+RCODE set to NOTIMP.
+
+% RESOLVER_AXFR_UDP AXFR request received over UDP
+A debug message, the resolver received a NOTIFY message over UDP. The server
+cannot process it (and in any case, an AXFR request should be sent over TCP)
+and will return an error message to the sender with the RCODE set to FORMERR.
+
+% RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small
+An error indicating that the configuration value specified for the query
+timeout is too small.
+
+% RESOLVER_CONFIG_CHANNEL configuration channel created
+A debug message, output when the resolver has successfully established a
+connection to the configuration channel.
+
+% RESOLVER_CONFIG_ERROR error in configuration: %1
+An error was detected in a configuration update received by the resolver. This
+may be in the format of the configuration message (in which case this is a
+programming error) or it may be in the data supplied (in which case it is
+a user error). The reason for the error, given as a parameter in the message,
+will give more details.
+
+% RESOLVER_CONFIG_LOADED configuration loaded
+A debug message, output when the resolver configuration has been successfully
+loaded.
+
+% RESOLVER_CONFIG_UPDATED configuration updated: %1
+A debug message, the configuration has been updated with the specified
+information.
+
+% RESOLVER_CREATED main resolver object created
+A debug message, output when the Resolver() object has been created.
+
+% RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1
+A debug message, this always precedes some other logging message and is the
+formatted contents of the DNS packet that the other message refers to.
+
+% RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2
+A debug message, this contains details of the response sent back to the querying
+system.
+
+% RESOLVER_FAILED resolver failed, reason: %1
+This is an error message output when an unhandled exception is caught by the
+resolver. All it can do is to shut down.
+
+% RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)
+This message may appear multiple times during startup, and it lists the
+forward addresses used by the resolver when running in forwarding mode.
+
+% RESOLVER_FORWARD_QUERY processing forward query
+The received query has passed all checks and is being forwarded to upstream
+servers.
+
+% RESOLVER_HEADER_ERROR message received, exception when processing header: %1
+A debug message noting that an exception occurred during the processing of
+a received packet. The packet has been dropped.
+
+% RESOLVER_IXFR IXFR request received
+The resolver received a NOTIFY message over TCP. The server cannot process it
+and will return an error message to the sender with the RCODE set to NOTIMP.
+
+% RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small
+An error indicating that the configuration value specified for the lookup
+timeout is too small.
+
+% RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2
+A debug message noting that the resolver received a message and the
+parsing of the body of the message failed due to some error (although
+the parsing of the header succeeded). The message parameters give a
+textual description of the problem and the RCODE returned.
+
+% RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration
+An error message indicating that the resolver configuration has specified a
+negative retry count. Only zero or positive values are valid.
+
+% RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message
+A debug message, the resolver has received a DNS packet that was not IN class.
+The resolver cannot handle such packets, so is returning a REFUSED response to
+the sender.
+
+% RESOLVER_NORMAL_QUERY processing normal query
+The received query has passed all checks and is being processed by the resolver.
+
+% RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative
+The resolver received a NOTIFY message. As the server is not authoritative it
+cannot process it, so it returns an error message to the sender with the RCODE
+set to NOTAUTH.
+
+% RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected
+A debug message, the resolver received a query that contained the number of
+entires in the question section detailed in the message. This is a malformed
+message, as a DNS query must contain only one question. The resolver will
+return a message to the sender with the RCODE set to FORMERR.
+
+% RESOLVER_NO_ROOT_ADDRESS no root addresses available
+A warning message during startup, indicates that no root addresses have been
+set. This may be because the resolver will get them from a priming query.
+
+% RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some non-protocol related reason
+(although the parsing of the header succeeded). The message parameters give
+a textual description of the problem and the RCODE returned.
+
+% RESOLVER_PRINT_COMMAND print message command, arguments are: %1
+This message is logged when a "print_message" command is received over the
+command channel.
+
+% RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2
+A debug message noting that the resolver received a message and the parsing
+of the body of the message failed due to some protocol error (although the
+parsing of the header succeeded). The message parameters give a textual
+description of the problem and the RCODE returned.
+
+% RESOLVER_QUERY_SETUP query setup
+A debug message noting that the resolver is creating a RecursiveQuery object.
+
+% RESOLVER_QUERY_SHUTDOWN query shutdown
+A debug message noting that the resolver is destroying a RecursiveQuery object.
+
+% RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small
+An error indicating that the configuration value specified for the query
+timeout is too small.
+
+% RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message
+A debug message indicating that the resolver has received a message. Depending
+on the debug settings, subsequent log output will indicate the nature of the
+message.
+
+% RESOLVER_RECURSIVE running in recursive mode
+This is an informational message that appears at startup noting that the
+resolver is running in recursive mode.
+
+% RESOLVER_SERVICE_CREATED service object created
+A debug message, output when the main service object (which handles the
+received queries) is created.
+
+% RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4
+A debug message, lists the parameters being set for the resolver. These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers. Client timeout: the interval to resolver a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolver the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query. Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+
+The client and lookup timeouts require a bit more explanation. The
+resolution of the client query might require a large number of queries to
+upstream nameservers. Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout. When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process. Data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+
+% RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)
+This message may appear multiple times during startup; it lists the root
+addresses used by the resolver.
+
+% RESOLVER_SHUTDOWN resolver shutdown complete
+This information message is output when the resolver has shut down.
+
+% RESOLVER_STARTED resolver started
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+
+% RESOLVER_STARTING starting resolver with command line '%1'
+An informational message, this is output when the resolver starts up.
+
+% RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring
+A debug message noting that the server has received a response instead of a
+query and is ignoring it.
+
+% RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver
+A debug message, the resolver received a message with an unsupported opcode
+(it can only process QUERY opcodes). It will return a message to the sender
+with the RCODE set to NOTIMP.
+
+% RESOLVER_SET_QUERY_ACL query ACL is configured
+A debug message that appears when a new query ACL is configured for the
+resolver.
+
+% RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4
+A debug message that indicates an incoming query is accepted in terms of
+the query ACL. The log message shows the query in the form of
+<query name>/<query type>/<query class>, and the client that sends the
+query in the form of <Source IP address>#<source port>.
+
+% RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4
+An informational message that indicates an incoming query is rejected
+in terms of the query ACL. This results in a response with an RCODE of
+REFUSED. See QUERYACCEPTED for the information given in the message.
+
+% RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4
+An informational message that indicates an incoming query is dropped
+in terms of the query ACL. Unlike the QUERYREJECTED case, the server does
+not return any response. See QUERYACCEPTED for the information given in
+the message.
diff --git a/src/bin/resolver/resolverdef.mes b/src/bin/resolver/resolverdef.mes
deleted file mode 100644
index 47433a4..0000000
--- a/src/bin/resolver/resolverdef.mes
+++ /dev/null
@@ -1,193 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX RESOLVER_
-# No namespace declaration - these constants go in the global namespace
-# along with the resolver methods.
-
-% AXFRTCP AXFR request received over TCP
-A debug message, the resolver received a NOTIFY message over TCP. The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
-
-% AXFRUDP AXFR request received over UDP
-A debug message, the resolver received a NOTIFY message over UDP. The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
-
-% CONFIGCHAN configuration channel created
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
-
-% CONFIGERR error in configuration: %1
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error). The reason for the error, given as a parameter in the message,
-will give more details.
-
-% CONFIGLOAD configuration loaded
-A debug message, output when the resolver configuration has been successfully
-loaded.
-
-% CONFIGUPD configuration updated: %1
-A debug message, the configuration has been updated with the specified
-information.
-
-% DNSMSGRCVD DNS message received: %1
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
-
-% DNSMSGSENT DNS message of %1 bytes sent: %2
-A debug message, this contains details of the response sent back to the querying
-system.
-
-% CLTMOSMALL client timeout of %1 is too small
-An error indicating that the configuration value specified for the query
-timeout is too small.
-
-% CREATED main resolver object created
-A debug message, output when the Resolver() object has been created.
-
-% FAILED resolver failed, reason: %1
-This is an error message output when an unhandled exception is caught by the
-resolver. All it can do is to shut down.
-
-% FWDADDR setting forward address %1(%2)
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
-
-% FWDQUERY processing forward query
-The received query has passed all checks and is being forwarded to upstream
-servers.
-
-% HDRERR message received, exception when processing header: %1
-A debug message noting that an exception occurred during the processing of
-a received packet. The packet has been dropped.
-
-% IXFR IXFR request received
-The resolver received a NOTIFY message over TCP. The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
-
-% LKTMOSMALL lookup timeout of %1 is too small
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
-
-% NFYNOTAUTH NOTIFY arrived but server is not authoritative
-The resolver received a NOTIFY message. As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
-
-% NORMQUERY processing normal query
-The received query has passed all checks and is being processed by the resolver.
-
-% NOTIN non-IN class request received, returning REFUSED message
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
-
-% NOROOTADDR no root addresses available
-A warning message during startup, indicates that no root addresses have been
-set. This may be because the resolver will get them from a priming query.
-
-% NOTONEQUES query contained %1 questions, exactly one question was expected
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message. This is a malformed
-message, as a DNS query must contain only one question. The resolver will
-return a message to the sender with the RCODE set to FORMERR.
-
-% OPCODEUNS opcode %1 not supported by the resolver
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes). It will return a message to the sender
-with the RCODE set to NOTIMP.
-
-% PARSEERR error parsing received message: %1 - returning %2
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded). The message parameters give
-a textual description of the problem and the RCODE returned.
-
-% PRINTMSG print message command, arguments are: %1
-This message is logged when a "print_message" command is received over the
-command channel.
-
-% PROTERR protocol error parsing received message: %1 - returning %2
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded). The message parameters give a textual
-description of the problem and the RCODE returned.
-
-% QUSETUP query setup
-A debug message noting that the resolver is creating a RecursiveQuery object.
-
-% QUSHUT query shutdown
-A debug message noting that the resolver is destroying a RecursiveQuery object.
-
-% QUTMOSMALL query timeout of %1 is too small
-An error indicating that the configuration value specified for the query
-timeout is too small.
-
-% RECURSIVE running in recursive mode
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
-
-% RECVMSG resolver has received a DNS message
-A debug message indicating that the resolver has received a message. Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
-
-% RETRYNEG negative number of retries (%1) specified in the configuration
-An error message indicating that the resolver configuration has specified a
-negative retry count. Only zero or positive values are valid.
-
-% ROOTADDR setting root address %1(%2)
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
-
-% SERVICE service object created
-A debug message, output when the main service object (which handles the
-received queries) is created.
-
-% SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4
-A debug message, lists the parameters associated with the message. These are:
-query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers. Client timeout: the interval to resolver a query by
-a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
-resolver gives up trying to resolve a query. Retry count: the number of times
-the resolver will retry a query to an upstream server if it gets a timeout.
-
-The client and lookup timeouts require a bit more explanation. The
-resolution of the client query might require a large number of queries to
-upstream nameservers. Even if none of these queries timeout, the total time
-taken to perform all the queries may exceed the client timeout. When this
-happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache. However,
-there comes a time - the lookup timeout - when even the resolve gives up.
-At this point it will wait for pending upstream queries to complete or
-timeout and drop the query.
-
-% SHUTDOWN resolver shutdown complete
-This information message is output when the resolver has shut down.
-
-% STARTED resolver started
-This informational message is output by the resolver when all initialization
-has been completed and it is entering its main loop.
-
-% STARTING starting resolver with command line '%1'
-An informational message, this is output when the resolver starts up.
-
-% UNEXRESP received unexpected response, ignoring
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index 35b5398..c519617 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -28,7 +28,7 @@ run_unittests_SOURCES += resolver_config_unittest.cc
run_unittests_SOURCES += response_scrubber_unittest.cc
run_unittests_SOURCES += run_unittests.cc
-nodist_run_unittests_SOURCES = ../resolverdef.h ../resolverdef.cc
+nodist_run_unittests_SOURCES = ../resolver_messages.h ../resolver_messages.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
@@ -47,6 +47,8 @@ run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
# Note the ordering matters: -Wno-... must follow -Wextra (defined in
diff --git a/src/bin/resolver/tests/resolver_config_unittest.cc b/src/bin/resolver/tests/resolver_config_unittest.cc
index 70e856d..9006301 100644
--- a/src/bin/resolver/tests/resolver_config_unittest.cc
+++ b/src/bin/resolver/tests/resolver_config_unittest.cc
@@ -16,12 +16,23 @@
#include <string>
+#include <boost/scoped_ptr.hpp>
+
#include <gtest/gtest.h>
#include <cc/data.h>
+#include <config/ccsession.h>
+
#include <asiodns/asiodns.h>
#include <asiolink/asiolink.h>
+#include <asiolink/io_address.h>
+#include <asiolink/io_socket.h>
+#include <asiolink/io_message.h>
+
+#include <acl/acl.h>
+
+#include <server_common/client.h>
#include <resolver/resolver.h>
@@ -30,25 +41,37 @@
#include <testutils/portconfig.h>
using namespace std;
+using boost::scoped_ptr;
+using namespace isc::acl;
using namespace isc::data;
using namespace isc::testutils;
using namespace isc::asiodns;
using namespace isc::asiolink;
+using namespace isc::server_common;
using isc::UnitTestUtil;
namespace {
class ResolverConfig : public ::testing::Test {
- public:
- IOService ios;
- DNSService dnss;
- Resolver server;
- ResolverConfig() :
- dnss(ios, NULL, NULL, NULL)
- {
- server.setDNSService(dnss);
- server.setConfigured();
- }
- void invalidTest(const string &JSON, const string& name);
+protected:
+ IOService ios;
+ DNSService dnss;
+ Resolver server;
+ scoped_ptr<const IOEndpoint> endpoint;
+ scoped_ptr<const IOMessage> request;
+ scoped_ptr<const Client> client;
+ ResolverConfig() : dnss(ios, NULL, NULL, NULL) {
+ server.setDNSService(dnss);
+ server.setConfigured();
+ }
+ const Client& createClient(const string& source_addr) {
+ endpoint.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress(source_addr),
+ 53210));
+ request.reset(new IOMessage(NULL, 0, IOSocket::getDummyUDPSocket(),
+ *endpoint));
+ client.reset(new Client(*request));
+ return (*client);
+ }
+ void invalidTest(const string &JSON, const string& name);
};
TEST_F(ResolverConfig, forwardAddresses) {
@@ -228,4 +251,128 @@ TEST_F(ResolverConfig, invalidTimeoutsConfig) {
"}", "Negative number of retries");
}
+TEST_F(ResolverConfig, defaultQueryACL) {
+ // If no configuration is loaded, the default ACL should reject everything.
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createClient("2001:db8::1")));
+
+ // The following would be allowed if the server had loaded the default
+ // configuration from the spec file. In this context it should not have
+ // happened, and they should be rejected just like the above cases.
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("127.0.0.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("::1")));
+}
+
+TEST_F(ResolverConfig, emptyQueryACL) {
+ // Explicitly configured empty ACL should have the same effect.
+ ElementPtr config(Element::fromJSON("{ \"query_acl\": [] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createClient("2001:db8::1")));
+}
+
+TEST_F(ResolverConfig, queryACLIPv4) {
+ // A simple "accept" query for a specific IPv4 address
+ ElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"} ] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createClient("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createClient("2001:db8::1")));
+}
+
+TEST_F(ResolverConfig, queryACLIPv6) {
+ // same for IPv6
+ ElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"2001:db8::1\"} ] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("192.0.2.1")));
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(
+ createClient("2001:db8::1")));
+}
+
+TEST_F(ResolverConfig, multiEntryACL) {
+ // A bit more complicated one: mixture of IPv4 and IPv6 with 3 rules
+ // in total. We shouldn't have to check so many variations of rules
+ // as it should have been tested in the underlying ACL module. All we
+ // have to do to check is a reasonably complicated ACL configuration is
+ // loaded as expected.
+ ElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"},"
+ " {\"action\": \"REJECT\","
+ " \"from\": \"192.0.2.0/24\"},"
+ " {\"action\": \"DROP\","
+ " \"from\": \"2001:db8::1\"},"
+ "] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createClient("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("192.0.2.2")));
+ EXPECT_EQ(DROP, server.getQueryACL().execute(
+ createClient("2001:db8::1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createClient("2001:db8::2"))); // match the default rule
+}
+
+int
+getResultCode(ConstElementPtr result) {
+ int rcode;
+ isc::config::parseAnswer(rcode, result);
+ return (rcode);
+}
+
+TEST_F(ResolverConfig, badQueryACL) {
+ // Most of these cases shouldn't happen in practice because the syntax
+ // check should be performed before updateConfig(). But we check at
+ // least the server code won't crash even if an unexpected input is given.
+
+ // ACL must be a list
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\": 1 }"))));
+ // Each rule must have "action" and "from"
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"from\": \"192.0.2.1\"} ] }"))));
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": \"DROP\"} ] }"))));
+ // invalid "action"
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": 1,"
+ " \"from\": \"192.0.2.1\"}]}"))));
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": \"BADACTION\","
+ " \"from\": \"192.0.2.1\"}]}"))));
+
+ // invalid "from"
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": 53}]}"))));
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"1922.0.2.1\"}]}"))));
+}
+
}
diff --git a/src/bin/resolver/tests/resolver_unittest.cc b/src/bin/resolver/tests/resolver_unittest.cc
index 97edf12..9bcc261 100644
--- a/src/bin/resolver/tests/resolver_unittest.cc
+++ b/src/bin/resolver/tests/resolver_unittest.cc
@@ -12,14 +12,21 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <string>
+
+#include <exceptions/exceptions.h>
+
#include <dns/name.h>
+#include <cc/data.h>
#include <resolver/resolver.h>
#include <dns/tests/unittest_util.h>
#include <testutils/dnsmessage_test.h>
#include <testutils/srv_test.h>
+using namespace std;
using namespace isc::dns;
+using namespace isc::data;
using namespace isc::testutils;
using isc::UnitTestUtil;
@@ -28,7 +35,17 @@ const char* const TEST_PORT = "53535";
class ResolverTest : public SrvTestBase{
protected:
- ResolverTest() : server(){}
+ ResolverTest() : server() {
+ // By default queries from the "default remote address" will be
+ // rejected, so we'll need to add an explicit ACL entry to allow that.
+ server.setConfigured();
+ server.updateConfig(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"" +
+ string(DEFAULT_REMOTE_ADDRESS) +
+ "\"} ] }"));
+ }
virtual void processMessage() {
server.processMessage(*io_message,
parse_message,
@@ -136,4 +153,46 @@ TEST_F(ResolverTest, notifyFail) {
Opcode::NOTIFY().getCode(), QR_FLAG, 0, 0, 0, 0);
}
+TEST_F(ResolverTest, setQueryACL) {
+ // valid cases are tested through other tests. We only explicitly check
+ // an invalid case: passing a NULL shared pointer.
+ EXPECT_THROW(server.setQueryACL(
+ boost::shared_ptr<const Resolver::ClientACL>()),
+ isc::InvalidParameter);
+}
+
+TEST_F(ResolverTest, queryACL) {
+ // The "ACCEPT" cases are covered in other tests. Here we explicitly
+ // test "REJECT" and "DROP" cases.
+
+ // Clear the existing ACL, reverting to the "default reject" rule.
+
+ // AXFR over UDP. This would otherwise result in FORMERR.
+ server.updateConfig(Element::fromJSON("{ \"query_acl\": [] }"));
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
+ createRequestPacket(request_message, IPPROTO_UDP);
+ server.processMessage(*io_message, parse_message, response_message,
+ response_obuffer, &dnsserv);
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, Rcode::REFUSED(),
+ Opcode::QUERY().getCode(), QR_FLAG, 1, 0, 0, 0);
+
+ // Same query, but with an explicit "DROP" ACL entry. There should be
+ // no response.
+ server.updateConfig(Element::fromJSON("{ \"query_acl\": "
+ " [ {\"action\": \"DROP\","
+ " \"from\": \"" +
+ string(DEFAULT_REMOTE_ADDRESS) +
+ "\"} ] }"));
+ parse_message->clear(Message::PARSE);
+ response_message->clear(Message::RENDER);
+ response_obuffer->clear();
+ server.processMessage(*io_message, parse_message, response_message,
+ response_obuffer, &dnsserv);
+ EXPECT_FALSE(dnsserv.hasAnswer());
+}
+
+
}
diff --git a/src/bin/resolver/tests/response_scrubber_unittest.cc b/src/bin/resolver/tests/response_scrubber_unittest.cc
index eff5598..1570def 100644
--- a/src/bin/resolver/tests/response_scrubber_unittest.cc
+++ b/src/bin/resolver/tests/response_scrubber_unittest.cc
@@ -68,6 +68,12 @@ public:
return address_.getFamily();
}
+ // This is completely dummy and unused. Define it just for build.
+ virtual const struct sockaddr& getSockAddr() const {
+ static struct sockaddr sa;
+ return (sa);
+ }
+
private:
IOAddress address_; // Address of endpoint
uint16_t port_; // Port number of endpoint
diff --git a/src/bin/xfrin/Makefile.am b/src/bin/xfrin/Makefile.am
index 8a29949..0af9be6 100644
--- a/src/bin/xfrin/Makefile.am
+++ b/src/bin/xfrin/Makefile.am
@@ -6,12 +6,13 @@ pkglibexec_SCRIPTS = b10-xfrin
b10_xfrindir = $(pkgdatadir)
b10_xfrin_DATA = xfrin.spec
+pyexec_DATA = xfrin_messages.py
-CLEANFILES = b10-xfrin xfrin.pyc
+CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py xfrin_messages.py xfrin_messages.pyc
man_MANS = b10-xfrin.8
EXTRA_DIST = $(man_MANS) b10-xfrin.xml
-EXTRA_DIST += xfrin.spec
+EXTRA_DIST += xfrin.spec xfrin_messages.mes
if ENABLE_MAN
@@ -20,8 +21,12 @@ b10-xfrin.8: b10-xfrin.xml
endif
+# Define rule to build logging source files from message file
+xfrin_messages.py: xfrin_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrin/xfrin_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrin: xfrin.py
+b10-xfrin: xfrin.py xfrin_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrin.py >$@
chmod a+x $@
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index a9ca0f2..64e3563 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -29,12 +29,17 @@ from isc.config.ccsession import *
from isc.notify import notify_out
import isc.util.process
import isc.net.parse
+from xfrin_messages import *
+
+isc.log.init("b10-xfrin")
+logger = isc.log.Logger("xfrin")
+
try:
from pydnspp import *
except ImportError as e:
# C++ loadable module may not be installed; even so the xfrin process
# must keep running, so we warn about it and move forward.
- sys.stderr.write('[b10-xfrin] failed to import DNS module: %s\n' % str(e))
+ logger.error(XFRIN_IMPORT_DNS, str(e))
isc.util.process.rename()
@@ -69,9 +74,6 @@ __version__ = 'BIND10'
XFRIN_OK = 0
XFRIN_FAIL = 1
-def log_error(msg):
- sys.stderr.write("[b10-xfrin] %s\n" % str(msg))
-
class XfrinException(Exception):
pass
@@ -150,8 +152,7 @@ class XfrinConnection(asyncore.dispatcher):
self.connect(self._master_address)
return True
except socket.error as e:
- self.log_msg('Failed to connect:(%s), %s' % (self._master_address,
- str(e)))
+ logger.error(CONNECT_MASTER, self._master_address, str(e))
return False
def _create_query(self, query_type):
@@ -264,31 +265,27 @@ class XfrinConnection(asyncore.dispatcher):
logstr = 'SOA check for \'%s\' ' % self._zone_name
ret = self._check_soa_serial()
- logstr = 'transfer of \'%s\': AXFR ' % self._zone_name
if ret == XFRIN_OK:
- self.log_msg(logstr + 'started')
+ logger.info(XFRIN_AXFR_TRANSFER_STARTED, self._zone_name)
self._send_query(RRType.AXFR())
isc.datasrc.sqlite3_ds.load(self._db_file, self._zone_name,
self._handle_xfrin_response)
- self.log_msg(logstr + 'succeeded')
+ logger.info(XFRIN_AXFR_TRANSFER_SUCCESS, self._zone_name)
except XfrinException as e:
- self.log_msg(e)
- self.log_msg(logstr + 'failed')
+ logger.error(XFRIN_AXFR_TRANSFER_FAILURE, self._zone_name, str(e))
ret = XFRIN_FAIL
#TODO, recover data source.
except isc.datasrc.sqlite3_ds.Sqlite3DSError as e:
- self.log_msg(e)
- self.log_msg(logstr + 'failed')
+ logger.error(XFRIN_AXFR_DATABASE_FAILURE, self._zone_name, str(e))
ret = XFRIN_FAIL
except UserWarning as e:
# XXX: this is an exception from our C++ library via the
# Boost.Python binding. It would be better to have more more
# specific exceptions, but at this moment this is the finest
# granularity.
- self.log_msg(e)
- self.log_msg(logstr + 'failed')
+ logger.error(XFRIN_AXFR_INTERNAL_FAILURE, self._zone_name, str(e))
ret = XFRIN_FAIL
finally:
self.close()
@@ -395,11 +392,6 @@ class XfrinConnection(asyncore.dispatcher):
# Overwrite the log function, log nothing
pass
- def log_msg(self, msg):
- if self._verbose:
- sys.stdout.write('[b10-xfrin] %s\n' % str(msg))
-
-
def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
shutdown_event, master_addrinfo, check_soa, verbose,
tsig_key):
@@ -481,8 +473,8 @@ class ZoneInfo:
try:
self.master_addr = isc.net.parse.addr_parse(master_addr_str)
except ValueError:
+ logger.error(XFRIN_BAD_MASTER_ADDR_FORMAT, master_addr_str)
errmsg = "bad format for zone's master: " + master_addr_str
- log_error(errmsg)
raise XfrinZoneInfoException(errmsg)
def set_master_port(self, master_port_str):
@@ -496,8 +488,8 @@ class ZoneInfo:
try:
self.master_port = isc.net.parse.port_parse(master_port_str)
except ValueError:
+ logger.error(XFRIN_BAD_MASTER_PORT_FORMAT, master_port_str)
errmsg = "bad format for zone's master port: " + master_port_str
- log_error(errmsg)
raise XfrinZoneInfoException(errmsg)
def set_zone_class(self, zone_class_str):
@@ -514,8 +506,8 @@ class ZoneInfo:
try:
self.rrclass = RRClass(zone_class_str)
except InvalidRRClass:
+ logger.error(XFRIN_BAD_ZONE_CLASS, zone_class_str)
errmsg = "invalid zone class: " + zone_class_str
- log_error(errmsg)
raise XfrinZoneInfoException(errmsg)
def set_tsig_key(self, tsig_key_str):
@@ -529,8 +521,8 @@ class ZoneInfo:
try:
self.tsig_key = TSIGKey(tsig_key_str)
except InvalidParameter as ipe:
+ logger.error(XFRIN_BAD_TSIG_KEY_STRING, tsig_key_str)
errmsg = "bad TSIG key string: " + tsig_key_str
- log_error(errmsg)
raise XfrinZoneInfoException(errmsg)
def get_master_addr_info(self):
@@ -556,7 +548,8 @@ class Xfrin:
self._send_cc_session = isc.cc.Session()
self._module_cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
self.config_handler,
- self.command_handler)
+ self.command_handler,
+ None, True)
self._module_cc.start()
config_data = self._module_cc.get_full_config()
self.config_handler(config_data)
@@ -635,7 +628,7 @@ class Xfrin:
if zone_info is None:
# TODO what to do? no info known about zone. defaults?
errmsg = "Got notification to retransfer unknown zone " + zone_name.to_text()
- log_error(errmsg)
+ logger.error(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_name.to_text())
answer = create_answer(1, errmsg)
else:
master_addr = zone_info.get_master_addr_info()
@@ -670,7 +663,7 @@ class Xfrin:
else:
answer = create_answer(1, 'unknown command: ' + command)
except XfrinException as err:
- log_error('error happened for command: %s, %s' % (command, str(err)) )
+ logger.error(XFRIN_COMMAND_ERROR, command, str(err))
answer = create_answer(1, str(err))
return answer
@@ -762,8 +755,7 @@ class Xfrin:
except isc.cc.session.SessionTimeout:
pass # for now we just ignore the failure
except socket.error as err:
- log_error("Fail to send message to %s and %s, msgq may has been killed"
- % (XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME))
+ logger.error(XFRIN_MSGQ_SEND_ERROR, XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME)
else:
msg = create_command(ZONE_XFRIN_FAILED, param)
# catch the exception, in case msgq has been killed.
@@ -775,8 +767,7 @@ class Xfrin:
except isc.cc.session.SessionTimeout:
pass # for now we just ignore the failure
except socket.error as err:
- log_error("Fail to send message to %s, msgq may has been killed"
- % ZONE_MANAGER_MODULE_NAME)
+ logger.error(XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER, ZONE_MANAGER_MODULE_NAME)
def startup(self):
while not self._shutdown_event.is_set():
@@ -844,12 +835,11 @@ def main(xfrin_class, use_signal = True):
xfrind = xfrin_class(verbose = options.verbose)
xfrind.startup()
except KeyboardInterrupt:
- log_error("exit b10-xfrin")
+ logger.info(XFRIN_STOPPED_BY_KEYBOARD)
except isc.cc.session.SessionError as e:
- log_error(str(e))
- log_error('Error happened! is the command channel daemon running?')
+ logger.error(XFRIN_CC_SESSION_ERROR, str(e))
except Exception as e:
- log_error(str(e))
+ logger.error(XFRIN_UNKNOWN_ERROR, str(e))
if xfrind:
xfrind.shutdown()
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
new file mode 100644
index 0000000..80a0be3
--- /dev/null
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -0,0 +1,91 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+
+% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+
+% XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+
+% XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+
+% XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded
+The AXFR transfer of the given zone was successfully completed.
+
+% XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1
+The given master address is not a valid IP address.
+
+% XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1
+The master port as read from the configuration is not a valid port number.
+
+% XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+
+% XFRIN_BAD_ZONE_CLASS Invalid zone class: %1
+The zone class as read from the configuration is not a valid DNS class.
+
+% XFRIN_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+
+% XFRIN_COMMAND_ERROR error while executing command '%1': %2
+There was an error while the given command was being processed. The
+error is given in the log message.
+
+% XFRIN_CONNECT_MASTER error connecting to master at %1: %2
+There was an error opening a connection to the master. The error is
+shown in the log message.
+
+% XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+
+% XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+
+% XFRIN_IMPORT_DNS error importing python DNS module: %1
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+
+% XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+
+% XFRIN_STARTING starting resolver with command line '%1'
+An informational message, this is output when the resolver starts up.
+
+% XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+
+% XFRIN_UNKNOWN_ERROR unknown error: %1
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
diff --git a/src/bin/xfrout/Makefile.am b/src/bin/xfrout/Makefile.am
index 82d7652..c5492ad 100644
--- a/src/bin/xfrout/Makefile.am
+++ b/src/bin/xfrout/Makefile.am
@@ -6,11 +6,12 @@ pkglibexec_SCRIPTS = b10-xfrout
b10_xfroutdir = $(pkgdatadir)
b10_xfrout_DATA = xfrout.spec
+pyexec_DATA = xfrout_messages.py
-CLEANFILES= b10-xfrout xfrout.pyc xfrout.spec
+CLEANFILES= b10-xfrout xfrout.pyc xfrout.spec xfrout_messages.py xfrout_messages.pyc
man_MANS = b10-xfrout.8
-EXTRA_DIST = $(man_MANS) b10-xfrout.xml
+EXTRA_DIST = $(man_MANS) b10-xfrout.xml xfrout_messages.mes
if ENABLE_MAN
@@ -19,12 +20,15 @@ b10-xfrout.8: b10-xfrout.xml
endif
+# Define rule to build logging source files from message file
+xfrout_messages.py: xfrout_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrout/xfrout_messages.mes
xfrout.spec: xfrout.spec.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.spec.pre >$@
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrout: xfrout.py
+b10-xfrout: xfrout.py xfrout_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.py >$@
chmod a+x $@
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index ac22fe4..a75ff22 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -26,7 +26,6 @@ from isc.datasrc import sqlite3_ds
from socketserver import *
import os
from isc.config.ccsession import *
-#from isc.log.log import *
from isc.cc import SessionError, SessionTimeout
from isc.notify import notify_out
import isc.util.process
@@ -36,13 +35,18 @@ import errno
from optparse import OptionParser, OptionValueError
from isc.util import socketserver_mixin
+from xfrout_messages import *
+
+isc.log.init("b10-xfrout")
+logger = isc.log.Logger("xfrout")
+
try:
from libutil_io_python import *
from pydnspp import *
except ImportError as e:
# C++ loadable module may not be installed; even so the xfrout process
# must keep running, so we warn about it and move forward.
- sys.stderr.write('[b10-xfrout] failed to import DNS or isc.util.io module: %s\n' % str(e))
+ log.error(XFROUT_IMPORT, str(e))
isc.util.process.rename()
@@ -110,7 +114,7 @@ class XfroutSession():
self.dns_xfrout_start(self._sock_fd, self._request_data)
#TODO, avoid catching all exceptions
except Exception as e:
- #self._log.log_message("error", str(e))
+ logger.error(XFROUT_HANDLE_QUERY_ERROR, str(e))
pass
os.close(self._sock_fd)
@@ -138,7 +142,7 @@ class XfroutSession():
rcode = self._check_request_tsig(msg, mdata)
except Exception as err:
- #self._log.log_message("error", str(err))
+ logger.error(XFROUT_PARSE_QUERY_ERROR, str(err))
return Rcode.FORMERR(), None
return rcode, msg
@@ -147,6 +151,9 @@ class XfroutSession():
question = msg.get_question()[0]
return question.get_name().to_text()
+ def _get_query_zone_class(self, msg):
+ question = msg.get_question()[0]
+ return question.get_class().to_text()
def _send_data(self, sock_fd, data):
size = len(data)
@@ -243,19 +250,23 @@ class XfroutSession():
return self._reply_query_with_format_error(msg, sock_fd)
zone_name = self._get_query_zone_name(msg)
+ zone_class_str = self._get_query_zone_class(msg)
+ # TODO: should we not also include class in the check?
rcode_ = self._check_xfrout_available(zone_name)
+
if rcode_ != Rcode.NOERROR():
- #self._log.log_message("info", "transfer of '%s/IN' failed: %s",
- # zone_name, rcode_.to_text())
+ logger.info(XFROUT_AXFR_TRANSFER_FAILED, zone_name,
+ zone_class_str, rcode_.to_text())
return self. _reply_query_with_error_rcode(msg, sock_fd, rcode_)
try:
- #self._log.log_message("info", "transfer of '%s/IN': AXFR started" % zone_name)
+ logger.info(XFROUT_AXFR_TRANSFER_STARTED, zone_name, zone_class_str)
self._reply_xfrout_query(msg, sock_fd, zone_name)
- #self._log.log_message("info", "transfer of '%s/IN': AXFR end" % zone_name)
except Exception as err:
- #self._log.log_message("error", str(err))
+ logger.error(XFROUT_AXFR_TRANSFER_ERROR, zone_name,
+ zone_class_str, str(err))
pass
+ logger.info(XFROUT_AXFR_TRANSFER_DONE, zone_name, zone_class_str)
self._server.decrease_transfers_counter()
return
@@ -319,7 +330,7 @@ class XfroutSession():
for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
if self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
- #self._log.log_message("info", "xfrout process is being shutdown")
+ logger.info(XFROUT_STOPPING)
return
# TODO: RRType.SOA() ?
if RRType(rr_data[5]) == RRType("SOA"): #ignore soa record
@@ -396,7 +407,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
request, client_address = self.get_request()
except socket.error:
- #self._log.log_message("error", "Failed to fetch request")
+ logger.error(XFROUT_FETCH_REQUEST_ERROR)
return
# Check self._shutdown_event to ensure the real shutdown comes.
@@ -410,7 +421,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
(rlist, wlist, xlist) = ([], [], [])
continue
else:
- #self._log.log_message("error", "Error with select(): %s" %e)
+ logger.error(XFROUT_SOCKET_SELECT_ERROR, str(e))
break
# self.server._shutdown_event will be set by now, if it is not a false
@@ -420,9 +431,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
self.process_request(request)
- except:
- #self._log.log_message("error", "Exception happened during processing of %s"
- # % str(client_address))
+ except Exception as pre:
+ log.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
break
def _handle_request_noblock(self):
@@ -440,8 +450,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
# This may happen when one xfrout process try to connect to
# xfrout unix socket server, to check whether there is another
# xfrout running.
- #if sock_fd == FD_COMM_ERROR:
- #self._log.log_message("error", "Failed to receive the file descriptor for XFR connection")
+ if sock_fd == FD_COMM_ERROR:
+ logger.error(XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR)
return
# receive request msg
@@ -466,8 +476,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
If it's not a socket file or nobody is listening
, it will be removed. If it can't be removed, exit from python. '''
if self._sock_file_in_use(sock_file):
- #self._log.log_message("error", "Fail to start xfrout process, unix socket file '%s'"
- # " is being used by another xfrout process\n" % sock_file)
+ logger.error(XFROUT_UNIX_SOCKET_FILE_IN_USE, sock_file)
sys.exit(0)
else:
if not os.path.exists(sock_file):
@@ -476,7 +485,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
os.unlink(sock_file)
except OSError as err:
- #self._log.log_message("error", "[b10-xfrout] Fail to remove file %s: %s\n" % (sock_file, err))
+ logger.error(XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR, sock_file, str(err))
sys.exit(0)
def _sock_file_in_use(self, sock_file):
@@ -497,18 +506,17 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
os.unlink(self._sock_file)
except Exception as e:
- #self._log.log_message('error', str(e))
+ logger.error(XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR, self._sock_file, str(e))
pass
def update_config_data(self, new_config):
'''Apply the new config setting of xfrout module. '''
- #self._log.log_message('info', 'update config data start.')
+ logger.info(XFROUT_NEW_CONFIG)
self._lock.acquire()
self._max_transfers_out = new_config.get('transfers_out')
self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
- #self._log.log_message('info', 'max transfer out : %d', self._max_transfers_out)
self._lock.release()
- #self._log.log_message('info', 'update config data complete.')
+ logger.info(XFROUT_NEW_CONFIG_DONE)
def set_tsig_key_ring(self, key_list):
"""Set the tsig_key_ring , given a TSIG key string list representation. """
@@ -523,8 +531,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
self.tsig_key_ring.add(TSIGKey(key_item))
except InvalidParameter as ipe:
- errmsg = "bad TSIG key string: " + str(key_item)
- #self._log.log_message('error', '%s' % errmsg)
+ logger.error(XFROUT_BAD_TSIG_KEY_STRING, str(key_item))
def get_db_file(self):
file, is_default = self._cc.get_remote_config_value("Auth", "database_file")
@@ -624,7 +631,7 @@ class XfroutServer:
def command_handler(self, cmd, args):
if cmd == "shutdown":
- #self._log.log_message("info", "Received shutdown command.")
+ logger.info(XFROUT_RECEIVED_SHUTDOWN_COMMAND)
self.shutdown()
answer = create_answer(0)
@@ -632,8 +639,7 @@ class XfroutServer:
zone_name = args.get('zone_name')
zone_class = args.get('zone_class')
if zone_name and zone_class:
- #self._log.log_message("info", "zone '%s/%s': receive notify others command" \
- # % (zone_name, zone_class))
+ logger.info(XFROUT_NOTIFY_COMMAND, zone_name, zone_class)
self.send_notify(zone_name, zone_class)
answer = create_answer(0)
else:
@@ -676,15 +682,11 @@ if '__main__' == __name__:
xfrout_server = XfroutServer()
xfrout_server.run()
except KeyboardInterrupt:
- sys.stderr.write("[b10-xfrout] exit xfrout process\n")
+ logger.INFO(XFROUT_STOPPED_BY_KEYBOARD)
except SessionError as e:
- sys.stderr.write("[b10-xfrout] Error creating xfrout, "
- "is the command channel daemon running?\n")
+ logger.error(XFROUT_CC_SESSION_ERROR, str(e))
except SessionTimeout as e:
- sys.stderr.write("[b10-xfrout] Error creating xfrout, "
- "is the configuration manager running?\n")
- except ModuleCCSessionError as e:
- sys.stderr.write("[b10-xfrout] exit xfrout process:%s\n" % str(e))
+ logger.error(XFROUT_CC_SESSION_TIMEOUT_ERROR)
if xfrout_server:
xfrout_server.shutdown()
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
new file mode 100644
index 0000000..2dada54
--- /dev/null
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -0,0 +1,140 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrout messages python module.
+
+% XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+
+% XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+
+% XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+# Still a TODO, but when implemented, REFUSED can also mean
+# the client is not allowed to transfer the zone
+
+% XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started
+A transfer out of the given zone has started.
+
+% XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+
+% XFROUT_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+
+% XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response
+There was a problem reading a response from antoher module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+
+% XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+
+% XFROUT_HANDLE_QUERY_ERROR error while handling query: %1
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+
+% XFROUT_IMPORT error importing python module: %1
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+
+% XFROUT_NEW_CONFIG Update xfrout configuration
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+
+% XFROUT_NEW_CONFIG_DONE Update xfrout configuration done
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+
+% XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+
+% XFROUT_PARSE_QUERY_ERROR error parsing query: %1
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+
+% XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+
+% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+
+% XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+
+% XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+
+% XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+
+% XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+
+% XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+
+% XFROUT_STOPPING the xfrout daemon is shutting down
+The current transfer is aborted, as the xfrout daemon is shutting down.
+
+% XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+
diff --git a/src/lib/Makefile.am b/src/lib/Makefile.am
index c8827e2..f4bef6b 100644
--- a/src/lib/Makefile.am
+++ b/src/lib/Makefile.am
@@ -1,3 +1,3 @@
SUBDIRS = exceptions util log cryptolink dns cc config python xfr \
bench asiolink asiodns nsas cache resolve testutils datasrc \
- server_common acl
+ acl server_common
diff --git a/src/lib/acl/Makefile.am b/src/lib/acl/Makefile.am
index d3222ae..f211025 100644
--- a/src/lib/acl/Makefile.am
+++ b/src/lib/acl/Makefile.am
@@ -2,14 +2,26 @@ SUBDIRS = . tests
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += $(BOOST_INCLUDES)
-
AM_CXXFLAGS = $(B10_CXXFLAGS)
+# The core library
lib_LTLIBRARIES = libacl.la
-libacl_la_SOURCES = check.h acl.h
+libacl_la_SOURCES = acl.h
+libacl_la_SOURCES += check.h
+libacl_la_SOURCES += ip_check.h ip_check.cc
+libacl_la_SOURCES += logic_check.h
libacl_la_SOURCES += loader.h loader.cc
libacl_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libacl_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libacl_la_LIBADD += $(top_builddir)/src/lib/util/libutil.la
+
+# DNS specialized one
+lib_LTLIBRARIES += libdnsacl.la
+
+libdnsacl_la_SOURCES = dns.h dns.cc
+
+libdnsacl_la_LIBADD = libacl.la
+libdnsacl_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
CLEANFILES = *.gcno *.gcda
diff --git a/src/lib/acl/dns.cc b/src/lib/acl/dns.cc
new file mode 100644
index 0000000..16f1bf5
--- /dev/null
+++ b/src/lib/acl/dns.cc
@@ -0,0 +1,34 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "dns.h"
+
+namespace isc {
+namespace acl {
+namespace dns {
+
+Loader&
+getLoader() {
+ static Loader* loader(NULL);
+ if (loader == NULL) {
+ loader = new Loader(REJECT);
+ // TODO: This is the place where we register default check creators
+ // like IP check, etc, once we have them.
+ }
+ return (*loader);
+}
+
+}
+}
+}
diff --git a/src/lib/acl/dns.h b/src/lib/acl/dns.h
new file mode 100644
index 0000000..6f36e51
--- /dev/null
+++ b/src/lib/acl/dns.h
@@ -0,0 +1,89 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_DNS_H
+#define ACL_DNS_H
+
+#include "loader.h"
+
+#include <asiolink/io_address.h>
+#include <dns/message.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+
+/**
+ * \brief DNS request to be checked.
+ *
+ * This plays the role of Context of the generic template ACLs (in namespace
+ * isc::acl).
+ *
+ * It is simple structure holding just the bunch of information. Therefore
+ * the names don't end up with a slash, there are no methods so they can't be
+ * confused with local variables.
+ *
+ * \todo Do we want a constructor to set this in a shorter manner? So we can
+ * call the ACLs directly?
+ */
+struct RequestContext {
+ /// \brief The DNS message (payload).
+ isc::dns::ConstMessagePtr message;
+ /// \brief The remote IP address (eg. the client).
+ asiolink::IOAddress remote_address;
+ /// \brief The local IP address (ours, of the interface where we received).
+ asiolink::IOAddress local_address;
+ /// \brief The remote port.
+ uint16_t remote_port;
+ /// \brief The local port.
+ uint16_t local_port;
+ /**
+ * \brief Name of the TSIG key the message is signed with.
+ *
+ * This will be either the name of the TSIG key the message is signed with,
+ * or empty string, if the message is not signed. It is true we could get
+ * the information from the message itself, but because at the time when
+ * the ACL is checked, the signature has been verified already, so passing
+ * it around is probably cheaper.
+ *
+ * It is expected that messages with invalid signatures are handled before
+ * ACL.
+ */
+ std::string tsig_key_name;
+};
+
+/// \brief DNS based check.
+typedef acl::Check<RequestContext> Check;
+/// \brief DNS based compound check.
+typedef acl::CompoundCheck<RequestContext> CompoundCheck;
+/// \brief DNS based ACL.
+typedef acl::ACL<RequestContext> ACL;
+/// \brief DNS based ACL loader.
+typedef acl::Loader<RequestContext> Loader;
+
+/**
+ * \brief Loader singleton access function.
+ *
+ * This function returns a loader of ACLs. It is expected applications
+ * will use this function instead of creating their own loaders, because
+ * one is enough, this one will have registered default checks and it
+ * is known one, so any plugins can registrer additional checks as well.
+ */
+Loader& getLoader();
+
+}
+}
+}
+
+#endif
diff --git a/src/lib/acl/ip_check.cc b/src/lib/acl/ip_check.cc
new file mode 100644
index 0000000..76aacca
--- /dev/null
+++ b/src/lib/acl/ip_check.cc
@@ -0,0 +1,141 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sys/socket.h>
+
+#include <exceptions/exceptions.h>
+
+#include <boost/lexical_cast.hpp>
+
+#include <acl/ip_check.h>
+
+using namespace std;
+using namespace isc;
+
+namespace isc {
+namespace acl {
+namespace internal {
+
+uint8_t
+createMask(size_t prefixlen) {
+
+ if (prefixlen == 0) {
+ return (0);
+
+ } else if (prefixlen <= 8) {
+
+ // In the following discussion:
+ //
+ // w is the width of the data type in bits.
+ // m is the value of prefixlen, the number of most signifcant bits we
+ // want to set.
+ // ** is exponentiation (i.e. 2**n is 2 raised to the power of n).
+ //
+ // We note that the value of 2**m - 1 gives a value with the least
+ // significant m bits set. For a data type of width w, this means that
+ // the most signficant (w-m) bits are clear.
+ //
+ // Hence the value 2**(w-m) - 1 gives a result with the least signficant
+ // w-m bits set and the most significant m bits clear. The 1's
+ // complement of this value gives is the result we want.
+ //
+ // Final note: at this point in the logic, m is non-zero, so w-m < w.
+ // This means 1<<(w-m) will fit into a variable of width w bits. In
+ // other words, in the expression below, no term will cause an integer
+ // overflow.
+ return (~((1 << (8 - prefixlen)) - 1));
+ }
+
+ // Mask size is too large. (Note that prefixlen is unsigned, so can't be
+ // negative.)
+ isc_throw(isc::OutOfRange, "prefixlen argument must be between 0 and 8");
+}
+
+pair<string, int>
+splitIPAddress(const string& ipprefix) {
+
+ // Split string into its components - an address and a prefix length.
+ // We initialize by assuming that there is no slash in the string given.
+ string address = ipprefix;
+ string prefixlen = "";
+
+ const size_t slashpos = ipprefix.find('/');
+ if ((ipprefix.size() == 0) || (slashpos == 0) ||
+ (slashpos == (ipprefix.size() - 1))) {
+ // Nothing in prefix, or it starts with or ends with a slash.
+ isc_throw(isc::InvalidParameter, "address prefix of " << ipprefix <<
+ " is not valid");
+
+ } else if (slashpos != string::npos) {
+ // There is a slash somewhere in the string, split the string on it.
+ // Don't worry about multiple slashes - if there are some, they will
+ // appear in the prefixlen segment and will be detected when an attempt
+ // is made to convert it to a number.
+ address = ipprefix.substr(0, slashpos);
+ prefixlen = ipprefix.substr(slashpos + 1);
+ }
+
+ // Set the default value for the prefix length. As the type of the address
+ // is not known at the point this function is called, the maximum
+ // allowable value is also not known. The value of 0 is reserved for
+ // a "match any address" match.
+ int prefix_size = -1;
+
+ // If there is a prefixlength, attempt to convert it.
+ if (!prefixlen.empty()) {
+ try {
+ prefix_size = boost::lexical_cast<int>(prefixlen);
+ if (prefix_size < 0) {
+ isc_throw(isc::InvalidParameter, "address prefix of " <<
+ ipprefix << " is not valid");
+ }
+ } catch (boost::bad_lexical_cast&) {
+ isc_throw(isc::InvalidParameter, "prefix length of '" <<
+ prefixlen << "' is not valid");
+ }
+ }
+
+ return (make_pair(address, prefix_size));
+}
+} // namespace internal
+
+namespace {
+const uint8_t*
+getSockAddrData(const struct sockaddr& sa) {
+ const void* sa_ptr = &sa;
+ const void* data_ptr;
+ if (sa.sa_family == AF_INET) {
+ const struct sockaddr_in* sin =
+ static_cast<const struct sockaddr_in*>(sa_ptr);
+ data_ptr = &sin->sin_addr;
+ } else if (sa.sa_family == AF_INET6) {
+ const struct sockaddr_in6* sin6 =
+ static_cast<const struct sockaddr_in6*>(sa_ptr);
+ data_ptr = &sin6->sin6_addr;
+ } else {
+ isc_throw(BadValue, "Unsupported address family for IPAddress: " <<
+ static_cast<int>(sa.sa_family));
+ }
+ return (static_cast<const uint8_t*>(data_ptr));
+}
+}
+
+IPAddress::IPAddress(const struct sockaddr& sa) :
+ family(sa.sa_family),
+ data(getSockAddrData(sa)),
+ length(family == AF_INET ?
+ sizeof(struct in_addr) : sizeof(struct in6_addr))
+{}
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/acl/ip_check.h b/src/lib/acl/ip_check.h
new file mode 100644
index 0000000..794b943
--- /dev/null
+++ b/src/lib/acl/ip_check.h
@@ -0,0 +1,417 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __IP_CHECK_H
+#define __IP_CHECK_H
+
+#include <sys/socket.h>
+
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <vector>
+
+#include <boost/static_assert.hpp>
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sys/socket.h> // for AF_INET/AF_INET6
+#include <netinet/in.h>
+
+#include <acl/check.h>
+#include <exceptions/exceptions.h>
+#include <util/strutil.h>
+
+namespace isc {
+namespace acl {
+
+// Free functions. These are not supposed to be used outside this module,
+// but are declared public for testing. To try to conceal them, they are
+// put in an "internal" namespace.
+
+namespace internal {
+
+/// \brief Convert prefix length to mask
+///
+/// Given a prefix length and a data type, return a value of that data type
+/// with the most significant "prefix length" bits set. For example, if the
+/// data type is an uint8_t and the prefix length is 3, the function would
+/// return a uint8_t holding the binary value 11100000. This value is used as
+/// a mask in the address checks.
+///
+/// \param prefixlen number of bits to be set in the mask. This must be
+/// between 0 and 8.
+///
+/// \return uint8_t with the most significant "prefixlen" bits set.
+///
+/// \exception OutOfRange prefixlen is too large for the data type.
+
+uint8_t createMask(size_t prefixlen);
+
+/// \brief Split IP Address Prefix
+///
+/// Splits an IP address prefix (given in the form of "xxxxxx/n" or "xxxxx" into
+/// a string representing the IP address and a number giving the length of the
+/// prefix. (In the latter case, the prefix is equal in length to the width in
+/// width in bits of the data type holding the address.) An exception will be
+/// thrown if the string format is invalid or if the prefix length is invalid.
+///
+/// N.B. This function does NOT check that the address component is a valid IP
+/// address; this is done elsewhere in the address parsing process.
+///
+/// \param ipprefix Address or address prefix. The string should be passed
+/// without leading or trailing spaces.
+///
+/// \return Pair of (string, int) holding the address string and the prefix
+/// length. The second element is -1 if no prefix was given.
+///
+/// \exception InvalidParameter Address prefix not of the expected syntax
+
+std::pair<std::string, int>
+splitIPAddress(const std::string& ipprefix);
+
+} // namespace internal
+
+/// \brief A simple representation of IP address.
+///
+/// This structure provides address family independent interfaces of an
+/// IP(v4 or v6) address, so that the application can perform
+/// \c IPCheck::matches without knowing which version of address it is
+/// handling. (For example, consider the standard socket API: it uses
+/// the generic \c sockaddr structure to represent endpoints).
+///
+/// An object of this class could be constructed from various types of
+/// sources, but in the initial implementation there's only one constructor,
+/// which takes a \c sockaddr structure. For efficiency the \c IPAddress
+/// object only retains a reference to the necessary part of \c sockaddr.
+/// Therefore the corresponding \c sockaddr instance must be valid while the
+/// \c IPAddress object is used.
+///
+/// This class is copyable so that a fixed object can be easily reused for
+/// different addresses. To ensure internal integrity, specific member
+/// variables are kept private and only accessible via read-only accessor
+/// methods. Due to this, it is ensured, for example, that if \c getFamily()
+/// returns \c AF_INET6, \c getLength() always returns 16.
+///
+/// All accessor methods are straightforward and exception free.
+///
+/// In future, we may introduce the default constructor to further improve
+/// reusability.
+struct IPAddress {
+ /// The constructor from socket address structure.
+ ///
+ /// This constructor set up the internal data based on the actual type
+ /// \c sa. For example, if \c sa.sa_family is \c AF_INET, it assumes
+ /// \c sa actually refers to a \c sockaddr_in structure.
+ /// The behavior when this assumption isn't held is undefined.
+ ///
+ /// \param sa A reference to the socket address structure from which the
+ /// \c IPAddress is to be constructed.
+ explicit IPAddress(const struct sockaddr& sa);
+
+ /// Return the address family of the address
+ ///
+ /// It's AF_INET for IPv4 and AF_INET6 for IPv6.
+ int getFamily() const { return (family); }
+
+ /// Return the binary representation of the address in network byte order.
+ ///
+ /// Only the \c getLength() bytes from the returned pointer are ensured
+ /// to be valid. In addition, if the \c sockaddr structure given on
+ /// construction was dynamically allocated, the data is valid only until
+ /// the \c sockaddr is invalidated.
+ const uint8_t* getData() const { return (data); }
+
+ /// Return the length of the address.
+ size_t getLength() const { return (length); }
+private:
+ int family;
+ const uint8_t* data;
+ size_t length;
+};
+
+/// \brief IP Check
+///
+/// This class performs a match between an IP address prefix specified in an ACL
+/// and a given IP address. The check works for both IPv4 and IPv6 addresses.
+///
+/// The class is templated on the type of a context structure passed to the
+/// matches() method, and a template specialisation for that method must be
+/// supplied for the class to be used.
+
+template <typename Context>
+class IPCheck : public Check<Context> {
+private:
+ // Size of uint8_t array needed to hold different address types
+ static const size_t IPV6_SIZE = sizeof(struct in6_addr);
+ static const size_t IPV4_SIZE = sizeof(struct in_addr);
+
+ // Confirm our assumption of relative sizes - this allows us to assume that
+ // an array sized for an IPv6 address can hold an IPv4 address.
+ BOOST_STATIC_ASSERT(sizeof(struct in6_addr) > sizeof(struct in_addr));
+
+public:
+ /// \brief String Constructor
+ ///
+ /// Constructs an IP Check object from an address or address prefix in the
+ /// form <ip-address>/n".
+ ///
+ /// Also allowed are the special keywords "any4" and "any6", which match
+ /// any IPv4 or IPv6 address. These must be specified in lowercase.
+ ///
+ /// \param ipprefix IP address prefix in the form "<ip-address>/n"
+ /// (where the "/n" part is optional and should be valid for the
+ /// address). If "n" is specified as zero, the match is for any
+ /// address in that address family. The address can also be
+ /// given as "any4" or "any6".
+ IPCheck(const std::string& ipprefix) : family_(0) {
+
+ // Ensure array elements are correctly initialized with zeroes.
+ std::fill(address_, address_ + IPV6_SIZE, 0);
+ std::fill(mask_, mask_ + IPV6_SIZE, 0);
+
+ // Only deal with the string after we've removed leading and trailing
+ // spaces.
+ const std::string mod_prefix = isc::util::str::trim(ipprefix);
+
+ // Check for special cases first.
+ if (mod_prefix == "any4") {
+ family_ = AF_INET;
+
+ } else if (mod_prefix == "any6") {
+ family_ = AF_INET6;
+
+ } else {
+
+ // General address prefix. Split into address part and prefix
+ // length.
+ const std::pair<std::string, int> result =
+ internal::splitIPAddress(mod_prefix);
+
+ // Try to convert the address. If successful, the result is in
+ // network-byte order (most significant components at lower
+ // addresses).
+ int status = inet_pton(AF_INET6, result.first.c_str(), address_);
+ if (status == 1) {
+ // It was an IPv6 address.
+ family_ = AF_INET6;
+ } else {
+ // IPv6 interpretation failed, try IPv4.
+ status = inet_pton(AF_INET, result.first.c_str(), address_);
+ if (status == 1) {
+ family_ = AF_INET;
+ }
+ }
+
+ // Handle errors.
+ if (status == 0) {
+ isc_throw(isc::InvalidParameter, "address prefix of " <<
+ ipprefix << " is not valid");
+ } else if (status < 0) {
+ isc_throw(isc::Unexpected, "address conversion of " <<
+ ipprefix << " failed due to a system error");
+ }
+
+ // All done, so set the mask used in the address comparison.
+ setMask(result.second);
+ }
+ }
+
+ /// \brief Destructor
+ virtual ~IPCheck() {}
+
+ /// \brief The check itself
+ ///
+ /// Matches the passed argument to the condition stored here. Different
+ /// specialisations must be provided for different argument types, and the
+ /// program will fail to compile if a required specialisation is not
+ /// provided.
+ ///
+ /// It is expected that matches() will extract the address information from
+ /// the Context structure, and use compare() to actually perform the
+ /// comparison.
+ ///
+ /// \param context Information to be matched
+ virtual bool matches(const Context& context) const;
+
+ /// \brief Estimated cost
+ ///
+ /// Assume that the cost of the match is linear and depends on the
+ /// maximum number of comparison operations.
+ ///
+ /// \return Estimated cost of the comparison
+ virtual unsigned cost() const {
+ return ((family_ == AF_INET) ? IPV4_SIZE : IPV6_SIZE);
+ }
+
+ ///@{
+ /// Access methods - mainly for testing
+
+ /// \return Stored IP address
+ std::vector<uint8_t> getAddress() const {
+ const size_t vector_len = (family_ == AF_INET ? IPV4_SIZE : IPV6_SIZE);
+ return (std::vector<uint8_t>(address_, address_ + vector_len));
+ }
+
+ /// \return Network mask applied to match
+ std::vector<uint8_t> getMask() const {
+ const size_t vector_len = (family_ == AF_INET ? IPV4_SIZE : IPV6_SIZE);
+ return (std::vector<uint8_t>(mask_, mask_ + vector_len));
+ }
+
+ /// \return Prefix length of the match
+ size_t getPrefixlen() const {
+ // Work this out by counting bits in the mask.
+ size_t count = 0;
+ for (size_t i = 0; i < IPV6_SIZE; ++i) {
+ if (mask_[i] == 0xff) {
+ // All bits set in this byte
+ count += 8;
+ continue;
+
+ } else if (mask_[i] != 0) {
+ // Only some bits set in this byte. Count them.
+ uint8_t byte = mask_[i];
+ for (int j = 0; j < 8; ++j) {
+ count += byte & 0x01; // Add one if the bit is set
+ byte >>= 1; // Go for next bit
+ }
+ }
+ break;
+ }
+ return (count);
+ }
+
+ /// \return Address family
+ int getFamily() const {
+ return (family_);
+ }
+ ///@}
+
+protected:
+ /// \brief Comparison
+ ///
+ /// This is the actual comparison function that checks the IP address passed
+ /// to this class with the matching information in the class itself. It is
+ /// expected to be called from matches().
+ ///
+ /// \param testaddr Address (in network byte order) to test against the
+ /// check condition in the class. This is expected to
+ /// be IPV6_SIZE or IPV4_SIZE bytes long.
+ /// \param family Address family of testaddr.
+ ///
+ /// \return true if the address matches, false if it does not.
+ virtual bool compare(const uint8_t* testaddr, int family) const {
+
+ if (family != family_) {
+ // Can't match if the address is of the wrong family
+ return (false);
+ }
+
+ // Simple check failed, so have to do a complete match. To check that
+ // the address given matches the stored network address and mask, we
+ // check the simple condition that:
+ //
+ // address_given & mask_ == stored_address & mask_
+ //
+ // The result is checked for all bytes for which there are bits set in
+ // the mask. We stop at the first non-match (or when we run out of bits
+ // in the mask).
+ //
+ // Note that the mask represents a contiguous set of bits. As such, as
+ // soon as we find a mask byte of zeroes, we have run past the part of
+ // the address where we need to match.
+ //
+ // Note also that when checking an IPv4 address, the constructor has
+ // set all bytes in the mask beyond the first four bytes to zero.
+ // As the loop stops when it encounters a zero mask byte, if the
+ // ACL is for an IPV4 address, the loop will never check more than four
+ // bytes.
+
+ bool match = true;
+ for (int i = 0; match && (i < IPV6_SIZE) && (mask_[i] != 0); ++i) {
+ match = ((testaddr[i] & mask_[i]) == (address_[i] & mask_[i]));
+ }
+ return (match);
+ }
+
+private:
+ /// \brief Set Mask
+ ///
+ /// Sets up the mask from the prefix length. This involves setting
+ /// an individual mask in each byte of the mask array.
+ ///
+ /// The actual allowed value of the prefix length depends on the address
+ /// family.
+ ///
+ /// \param requested Requested prefix length size. If negative, the
+ /// maximum for the address family is assumed. (A negative value
+ /// will arise if the string constructor was used and no mask size
+ /// was given.)
+ void setMask(int requested) {
+
+ // Set the maximum number of bits allowed in the mask, and request
+ // that number of bits if no prefix length was given in the constructor.
+ const int maxmask = 8 * ((family_ == AF_INET) ? IPV4_SIZE : IPV6_SIZE);
+ if (requested < 0) {
+ requested = maxmask;
+ }
+
+ // Validate that the mask is valid.
+ if (requested <= maxmask) {
+
+ // Loop, setting the bits in the set of mask bytes until all the
+ // specified bits have been used up. As both IPv4 and IPv6
+ // addresses are stored in network-byte order, this works in
+ // both cases.
+ size_t bits_left = requested; // Bits remaining to set
+ int i = -1;
+ while (bits_left > 0) {
+ if (bits_left >= 8) {
+ mask_[++i] = ~0; // All bits set
+ bits_left -= 8;
+
+ } else if (bits_left > 0) {
+ mask_[++i] = internal::createMask(bits_left);
+ bits_left = 0;
+ }
+ }
+ } else {
+ isc_throw(isc::OutOfRange,
+ "mask size of " << requested << " is invalid " <<
+ "for the given address family");
+ }
+ }
+
+ // Member variables.
+ uint8_t address_[IPV6_SIZE]; ///< Address in binary form
+ uint8_t mask_[IPV6_SIZE]; ///< Address mask
+ int family_; ///< Address family
+};
+
+// Some compilers seem to need this to be explicitly defined outside the class
+template <typename Context>
+const size_t IPCheck<Context>::IPV6_SIZE;
+
+template <typename Context>
+const size_t IPCheck<Context>::IPV4_SIZE;
+
+} // namespace acl
+} // namespace isc
+
+#endif // __IP_CHECK_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/loader.h b/src/lib/acl/loader.h
index 11e7ebc..c3400cb 100644
--- a/src/lib/acl/loader.h
+++ b/src/lib/acl/loader.h
@@ -24,6 +24,10 @@
namespace isc {
namespace acl {
+class AnyOfSpec;
+class AllOfSpec;
+template<typename Mode, typename Context> class LogicOperator;
+
/**
* \brief Exception for bad ACL specifications.
*
@@ -263,7 +267,7 @@ public:
* \param description The JSON description of the check.
*/
boost::shared_ptr<Check<Context> > loadCheck(const data::ConstElementPtr&
- description)
+ description) const
{
// Get the description as a map
typedef std::map<std::string, data::ConstElementPtr> Map;
@@ -290,7 +294,7 @@ public:
* \param description The JSON list of ACL.
*/
boost::shared_ptr<ACL<Context, Action> > load(const data::ConstElementPtr&
- description)
+ description) const
{
// We first check it's a list, so we can use the list reference
// (the list may be huge)
@@ -346,7 +350,7 @@ private:
* the map.
*/
boost::shared_ptr<Check<Context> > loadCheck(const data::ConstElementPtr&
- description, Map& map)
+ description, Map& map) const
{
// Remove the action keyword
map.erase("action");
@@ -367,18 +371,45 @@ private:
}
if (creatorIt->second->allowListAbbreviation() &&
checkDesc->second->getType() == data::Element::list) {
- isc_throw_1(LoaderError,
- "Not implemented (OR-abbreviated form)",
- checkDesc->second);
+ // Or-abbreviated form - create an OR and put everything
+ // inside.
+ const std::vector<data::ConstElementPtr>&
+ params(checkDesc->second->listValue());
+ boost::shared_ptr<LogicOperator<AnyOfSpec, Context> >
+ oper(new LogicOperator<AnyOfSpec, Context>);
+ for (std::vector<data::ConstElementPtr>::const_iterator
+ i(params.begin());
+ i != params.end(); ++i) {
+ oper->addSubexpression(
+ creatorIt->second->create(name, *i, *this));
+ }
+ return (oper);
}
// Create the check and return it
return (creatorIt->second->create(name, checkDesc->second,
*this));
}
- default:
- isc_throw_1(LoaderError,
- "Not implemented (AND-abbreviated form)",
- description);
+ default: {
+ // This is the AND-abbreviated form. We need to create an
+ // AND (or "ALL") operator, loop trough the whole map and
+ // fill it in. We do a small trick - we create bunch of
+ // single-item maps, call this loader recursively (therefore
+ // it will get into the "case 1" branch, where there is
+ // the actual loading) and use the results to fill the map.
+ //
+ // We keep the description the same, there's nothing we could
+ // take out (we could create a new one, but that would be
+ // confusing, as it is used for error messages only).
+ boost::shared_ptr<LogicOperator<AllOfSpec, Context> >
+ oper(new LogicOperator<AllOfSpec, Context>);
+ for (Map::const_iterator i(map.begin()); i != map.end(); ++i) {
+ Map singleSubexpr;
+ singleSubexpr.insert(*i);
+ oper->addSubexpression(loadCheck(description,
+ singleSubexpr));
+ }
+ return (oper);
+ }
}
}
/**
@@ -401,4 +432,17 @@ private:
}
}
+/*
+ * This include at the end of the file is unusual. But we need to include it,
+ * we use template classes from there. However, they need to be present only
+ * at instantiation of our class, which will happen below this header.
+ *
+ * The problem is, the header uses us as well, therefore there's a circular
+ * dependency. If we loaded it at the beginning and someone loaded us first,
+ * the logic_check header wouldn't have our definitions. This way, no matter
+ * in which order they are loaded, the definitions from this header will be
+ * above the ones from logic_check.
+ */
+#include "logic_check.h"
+
#endif
diff --git a/src/lib/acl/logic_check.h b/src/lib/acl/logic_check.h
new file mode 100644
index 0000000..6e1c567
--- /dev/null
+++ b/src/lib/acl/logic_check.h
@@ -0,0 +1,206 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_LOGIC_CHECK_H
+#define ACL_LOGIC_CHECK_H
+
+#include "check.h"
+#include "loader.h"
+
+namespace isc {
+namespace acl {
+
+/// \brief Constants for the AnyOf implementation
+class AnyOfSpec {
+public:
+ static bool start() { return (false); }
+ static bool terminate(const bool another) {
+ return (another);
+ }
+};
+
+/// \brief Constants for the AllOf implementation
+class AllOfSpec {
+public:
+ static bool start() { return (true); }
+ static bool terminate(const bool another) {
+ return (!another);
+ }
+};
+
+/**
+ * \brief Logic operators
+ *
+ * This class implements the AllOf and AnyOf compound checks. As their
+ * behaviour is almost the same, the same template class is used. Which
+ * one it is depends on the Mode template parameter. The Mode should be
+ * one of AnyOfSpec or AllOfSpec, which provide some commands for the
+ * internal implementation. It would be nice to provide typedefs for
+ * them, but it is impossible to do so, as we have the Context template
+ * parameter as well and C++ doesn't like templated typedefs.
+ *
+ * The object holds several subexpressions and returns true if all
+ * of the subexpressions return true (in case of AllOfSpec Mode) or
+ * at last one of them return true (in case of AnyOfSpec Mode). If
+ * some subexpression guarantees the result (eg. some returns false
+ * in case of AllOfSpec), the rest is not tried for performance
+ * reasons.
+ */
+template<typename Mode, typename Context>
+class LogicOperator : public CompoundCheck<Context> {
+public:
+ /**
+ * \brief Add another subexpression.
+ *
+ * This adds another subexpression to the list of checked expressions.
+ * This is usually done shortly after the creation, before using the
+ * check for matches.
+ *
+ * Currently there's no way to place the expression into arbitrary place
+ * or to remove it. It might turn out it would be needed in future to
+ * optimise or it might even turn out we need shared pointers for it.
+ *
+ * \param expr The new expression to put inside.
+ */
+ void addSubexpression(const boost::shared_ptr<Check<Context> >& expr) {
+ checks_.push_back(expr);
+ }
+ /**
+ * \brief The current list of subexpressions.
+ */
+ virtual typename CompoundCheck<Context>::Checks getSubexpressions() const {
+ typename CompoundCheck<Context>::Checks result;
+ for (typename Checks::const_iterator i(checks_.begin());
+ i != checks_.end(); ++i) {
+ result.push_back(i->get());
+ }
+ return (result);
+ }
+ /**
+ * \brief The match of the check.
+ *
+ * Runs the subexpressions, one by one, and then decides based on that
+ * what to return.
+ */
+ virtual bool matches(const Context& context) const {
+ /*
+ * This might look slightly complicated. However, this is just
+ * generalized version of multi-and or multi-or. The usual
+ * implementation of multi-and starts with true and if one with
+ * false is found, it turns to be false forever and false is
+ * returned. It is exactly the other way around with or.
+ *
+ * So, if we ever find one that makes it the other one than start
+ * (false in case of and, true in case of or), we can just stop and
+ * return that one right away. If it meets no such expression, we
+ * get to the end and return the default.
+ */
+ for (typename Checks::const_iterator i(checks_.begin());
+ i != checks_.end(); ++i) {
+ if (Mode::terminate((*i)->matches(context))) {
+ return (!Mode::start());
+ }
+ }
+ return (Mode::start());
+ }
+private:
+ /// \brief List of subexpressions
+ typedef typename std::vector<boost::shared_ptr<Check<Context> > > Checks;
+ Checks checks_;
+};
+
+/**
+ * \brief Creator for the LogicOperator compound check.
+ *
+ * This class can load the ANY and ALL operators from JSON. They expect
+ * a list of subexpressions as a parameter, eg. like this:
+ *
+ * \verbatim
+ * {"ANY": [
+ * {"ip": "1.2.3.4"},
+ * {"ip": "5.6.7.8"}
+ * ]}
+ * \endverbatim
+ *
+ * It uses the loader to load the subexpressions, therefore whatever is
+ * supported there is supported here as well.
+ *
+ * The Mode template parameter has the same meaning as with LogicOperator,
+ * it is used to know which operators to create.
+ */
+template<typename Mode, typename Context, typename Action = BasicAction>
+class LogicCreator : public Loader<Context, Action>::CheckCreator {
+public:
+ /**
+ * \brief Constructor.
+ *
+ * \param name The name for which the loader will work. In practice,
+ * it will usually be ANY or ALL (depending on the mode), but
+ * anything else can be used as well.
+ */
+ LogicCreator(const std::string& name) :
+ name_(name)
+ {}
+ /// \brief Returns vector containing the name.
+ virtual std::vector<std::string> names() const {
+ std::vector<std::string> result;
+ result.push_back(name_);
+ return (result);
+ }
+ /**
+ * \brief Converts a JSON description into the logic operator.
+ *
+ * This is the place where the actual loading happens. It creates
+ * the logic operator and calls the loader on each of the list
+ * elements, placing the result into the logic operator.
+ *
+ * The first parameter is ignored and is there only to match interface.
+ *
+ * \param definition The JSON definition of the subexpressions. This must
+ * be a list (if it isn't, the LoaderError is thrown) and the elements
+ * must be loadable by the loader (the exceptions from it are not
+ * caught).
+ * \param loader The loader to use for loading of subexpressions.
+ */
+ virtual boost::shared_ptr<Check<Context> > create(const std::string&,
+ data::ConstElementPtr
+ definition,
+ const Loader<Context,
+ Action>& loader)
+ {
+ std::vector<data::ConstElementPtr> subexprs;
+ try {
+ subexprs = definition->listValue();
+ }
+ catch (const data::TypeError&) {
+ isc_throw_1(LoaderError, "Logic operator takes list", definition);
+ }
+ boost::shared_ptr<LogicOperator<Mode, Context> >
+ result(new LogicOperator<Mode, Context>);
+ for (std::vector<data::ConstElementPtr>::const_iterator
+ i(subexprs.begin());
+ i != subexprs.end(); ++i) {
+ result->addSubexpression(loader.loadCheck(*i));
+ }
+ return (result);
+ }
+ virtual bool allowListAbbreviation() const { return (false); }
+private:
+ const std::string name_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/acl/tests/Makefile.am b/src/lib/acl/tests/Makefile.am
index 6ceb2fa..03b08bb 100644
--- a/src/lib/acl/tests/Makefile.am
+++ b/src/lib/acl/tests/Makefile.am
@@ -1,20 +1,37 @@
AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+CLEANFILES = *.gcno *.gcda
TESTS =
if HAVE_GTEST
TESTS += run_unittests
run_unittests_SOURCES = run_unittests.cc
-run_unittests_SOURCES += check_test.cc acl_test.cc loader_test.cc
+run_unittests_SOURCES += acl_test.cc
+run_unittests_SOURCES += check_test.cc
+run_unittests_SOURCES += dns_test.cc
+run_unittests_SOURCES += ip_check_unittest.cc
+run_unittests_SOURCES += loader_test.cc
run_unittests_SOURCES += logcheck.h
+run_unittests_SOURCES += creators.h
+run_unittests_SOURCES += logic_check_test.cc
+
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/acl/tests/acl_test.cc b/src/lib/acl/tests/acl_test.cc
index 5829fe7..15ffef6 100644
--- a/src/lib/acl/tests/acl_test.cc
+++ b/src/lib/acl/tests/acl_test.cc
@@ -12,8 +12,14 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <boost/shared_ptr.hpp>
+
#include "logcheck.h"
+using namespace isc::acl;
+using namespace isc::acl::tests;
+using boost::shared_ptr;
+
namespace {
// Test version of the Acl class. It adds few methods to examine the protected
@@ -39,7 +45,7 @@ public:
TestACL acl_;
Log log_;
size_t next_check_;
- shared_ptr<Check<Log> > getCheck(bool accepts) {
+ boost::shared_ptr<Check<Log> > getCheck(bool accepts) {
return (shared_ptr<Check<Log> >(new ConstCheck(accepts,
next_check_++)));
}
diff --git a/src/lib/acl/tests/creators.h b/src/lib/acl/tests/creators.h
new file mode 100644
index 0000000..584df71
--- /dev/null
+++ b/src/lib/acl/tests/creators.h
@@ -0,0 +1,158 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// This is not a public header, but some code shared between tests
+// This one contains various creators to test the loader and other creators
+
+#ifndef CREATORS_H
+#define CREATORS_H
+
+#include "logcheck.h"
+
+#include <cc/data.h>
+#include <acl/loader.h>
+#include <string>
+
+namespace isc {
+namespace acl {
+namespace tests {
+
+// A check that doesn't check anything but remembers it's own name
+// and data
+class NamedCheck : public Check<Log> {
+public:
+ NamedCheck(const std::string& name, isc::data::ConstElementPtr data) :
+ name_(name),
+ data_(data)
+ {}
+ virtual bool matches(const Log&) const { return (true); }
+ const std::string name_;
+ const isc::data::ConstElementPtr data_;
+};
+
+// The creator of NamedCheck
+class NamedCreator : public Loader<Log>::CheckCreator {
+public:
+ NamedCreator(const std::string& name, bool abbreviatedList = true) :
+ abbreviated_list_(abbreviatedList)
+ {
+ names_.push_back(name);
+ }
+ NamedCreator(const std::vector<std::string>& names) :
+ names_(names),
+ abbreviated_list_(true)
+ {}
+ std::vector<std::string> names() const {
+ return (names_);
+ }
+ boost::shared_ptr<Check<Log> > create(const std::string& name,
+ isc::data::ConstElementPtr data,
+ const Loader<Log>&)
+ {
+ bool found(false);
+ for (std::vector<std::string>::const_iterator i(names_.begin());
+ i != names_.end(); ++i) {
+ if (*i == name) {
+ found = true;
+ break;
+ }
+ }
+ EXPECT_TRUE(found) << "Name " << name << " passed to creator which "
+ "doesn't handle it.";
+ return (boost::shared_ptr<Check<Log> >(new NamedCheck(name, data)));
+ }
+ bool allowListAbbreviation() const {
+ return (abbreviated_list_);
+ }
+private:
+ std::vector<std::string> names_;
+ const bool abbreviated_list_;
+};
+
+// To be thrown in tests internally
+class TestCreatorError {};
+
+// This will throw every time it should create something
+class ThrowCreator : public Loader<Log>::CheckCreator {
+public:
+ std::vector<std::string> names() const {
+ std::vector<std::string> result;
+ result.push_back("throw");
+ return (result);
+ }
+ boost::shared_ptr<Check<Log> > create(const std::string&,
+ isc::data::ConstElementPtr,
+ const Loader<Log>&)
+ {
+ throw TestCreatorError();
+ }
+};
+
+// This throws whenever the match is called on it
+class ThrowCheck : public Check<Log> {
+public:
+ virtual bool matches(const Log&) const {
+ throw TestCreatorError();
+ }
+};
+
+// And creator for it
+class ThrowCheckCreator : public Loader<Log>::CheckCreator {
+public:
+ std::vector<std::string> names() const {
+ std::vector<std::string> result;
+ result.push_back("throwcheck");
+ return (result);
+ }
+ boost::shared_ptr<Check<Log> > create(const std::string&,
+ isc::data::ConstElementPtr,
+ const Loader<Log>&)
+ {
+ return (boost::shared_ptr<Check<Log> >(new ThrowCheck()));
+ }
+};
+
+class LogCreator : public Loader<Log>::CheckCreator {
+public:
+ std::vector<std::string> names() const {
+ std::vector<std::string> result;
+ result.push_back("logcheck");
+ return (result);
+ }
+ /*
+ * For simplicity, we just take two values as a list, first is the
+ * logging cell used, the second is result of the check. No error checking
+ * is done, if there's bug in the test, it will throw TypeError for us.
+ */
+ boost::shared_ptr<Check<Log> > create(const std::string&,
+ isc::data::ConstElementPtr definition,
+ const Loader<Log>&)
+ {
+ std::vector<isc::data::ConstElementPtr> list(definition->listValue());
+ int logpos(list[0]->intValue());
+ bool accept(list[1]->boolValue());
+ return (boost::shared_ptr<ConstCheck>(new ConstCheck(accept, logpos)));
+ }
+ // We take a list, so don't interpret it for us
+ virtual bool allowListAbbreviation() const { return (false); }
+};
+
+}
+}
+}
+#endif
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/tests/dns_test.cc b/src/lib/acl/tests/dns_test.cc
new file mode 100644
index 0000000..e5e0f3a
--- /dev/null
+++ b/src/lib/acl/tests/dns_test.cc
@@ -0,0 +1,35 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <acl/dns.h>
+#include <gtest/gtest.h>
+
+using namespace isc::acl::dns;
+
+namespace {
+
+// Tests that the getLoader actually returns something, returns the same every
+// time and the returned value can be used to anything. It is not much of a
+// test, but the getLoader is not much of a function.
+TEST(DNSACL, getLoader) {
+ Loader* l(&getLoader());
+ ASSERT_TRUE(l != NULL);
+ EXPECT_EQ(l, &getLoader());
+ EXPECT_NO_THROW(l->load(isc::data::Element::fromJSON(
+ "[{\"action\": \"DROP\"}]")));
+ // TODO Test that the things we should register by default, like IP based
+ // check, are loaded.
+}
+
+}
diff --git a/src/lib/acl/tests/ip_check_unittest.cc b/src/lib/acl/tests/ip_check_unittest.cc
new file mode 100644
index 0000000..fb24978
--- /dev/null
+++ b/src/lib/acl/tests/ip_check_unittest.cc
@@ -0,0 +1,640 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netdb.h>
+#include <string.h>
+
+#include <gtest/gtest.h>
+#include <acl/ip_check.h>
+
+using namespace isc::acl;
+using namespace isc::acl::internal;
+using namespace std;
+
+namespace {
+const size_t IPV4_SIZE = 4;
+const size_t IPV6_SIZE = 16;
+
+// Simple struct holding either an IPV4 or IPV6 address. This is the "Context"
+// used for the tests.
+//
+// The structure is also used for converting an IPV4 address to a four-byte
+// array.
+struct GeneralAddress {
+ int family; // Family of the address
+ vector<uint8_t> addr; // Address type. Size indicates what it holds
+
+ // Convert uint32_t address in host-byte order to a uint8_t vector in
+ // network-byte order.
+ vector<uint8_t> convertUint32(uint32_t address) {
+ BOOST_STATIC_ASSERT(sizeof(uint32_t) == IPV4_SIZE);
+
+ vector<uint8_t> result(IPV4_SIZE);
+
+ // Address is in network-byte order, so copy to the array. The
+ // MS byte is at the lowest address.
+ result[3] = address & 0xff;
+ result[2] = (address >> 8) & 0xff;
+ result[1] = (address >> 16) & 0xff;
+ result[0] = (address >> 24) & 0xff;
+
+ return (result);
+ }
+
+ // Convenience constructor for V4 address. As it is not marked as explicit,
+ // it allows the automatic promotion of a uint32_t to a GeneralAddress data
+ // type in calls to matches().
+ GeneralAddress(uint32_t address) : family(AF_INET), addr()
+ {
+ addr = convertUint32(address);
+ }
+
+ // Convenience constructor for V6 address. As it is not marked as explicit,
+ // it allows the automatic promotion of a vector<uint8_t> to a
+ // GeneralAddress data type in calls to matches().
+ GeneralAddress(const vector<uint8_t>& address) : family(AF_INET6),
+ addr(address)
+ {
+ if (address.size() != IPV6_SIZE) {
+ isc_throw(isc::InvalidParameter, "vector passed to GeneralAddress "
+ "constructor is " << address.size() << " bytes long - it "
+ "should be " << IPV6_SIZE << " bytes instead");
+ }
+ }
+
+ // A couple of convenience methods for checking equality with different
+ // representations of an address.
+
+ // Check that the IPV4 address is the same as that given.
+ bool equals(uint32_t address) {
+ if (family == AF_INET) {
+ const vector<uint8_t> byte_address = convertUint32(address);
+ return (equal(byte_address.begin(), byte_address.end(),
+ addr.begin()));
+ }
+ return (false);
+ }
+
+ // Check that the array is equal to that given.
+ bool equals(const vector<uint8_t>& byte_address) {
+ if (addr.size() == byte_address.size()) {
+ return (equal(byte_address.begin(), byte_address.end(),
+ addr.begin()));
+ }
+ return (false);
+ }
+};
+} // Unnamed namespace
+
+// Provide a specialisation of the IPCheck::matches() method for the
+// GeneralAddress class.
+
+namespace isc {
+namespace acl {
+template <>
+bool IPCheck<GeneralAddress>::matches(const GeneralAddress& address) const {
+ return (compare(&address.addr[0], address.family));
+}
+} // namespace acl
+} // namespace isc
+
+namespace {
+/// *** Free Function Tests ***
+
+// Test the createMask() function.
+TEST(IPFunctionCheck, CreateMask) {
+
+ // Invalid arguments should throw.
+ EXPECT_THROW(createMask(9), isc::OutOfRange);
+
+ // Check on all possible 8-bit values.
+ uint16_t expected = 0xff00;
+ for (size_t i = 0; i <= 8; ++i, expected >>= 1) {
+ EXPECT_EQ(static_cast<uint8_t>(expected & 0xff), createMask(i));
+ }
+}
+
+// Test the splitIPAddress() function.
+TEST(IPFunctionCheck, SplitIPAddress) {
+ pair<string, uint32_t> result;
+
+ result = splitIPAddress("192.0.2.1");
+ EXPECT_EQ(string("192.0.2.1"), result.first);
+ EXPECT_EQ(-1, result.second);
+
+ result = splitIPAddress("192.0.2.1/24");
+ EXPECT_EQ(string("192.0.2.1"), result.first);
+ EXPECT_EQ(24, result.second);
+
+ result = splitIPAddress("2001:db8::/128");
+ EXPECT_EQ(string("2001:db8::"), result.first);
+ EXPECT_EQ(128, result.second);
+
+ result = splitIPAddress("192.0.2.1/0");
+ EXPECT_EQ(string("192.0.2.1"), result.first);
+ EXPECT_EQ(0, result.second);
+
+ EXPECT_THROW(splitIPAddress("192.0.2.43/27 "), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("192.0.2.43/-1"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("192.0.2.43//1"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("192.0.2.43/1/"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("/192.0.2.43/1"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("2001:db8::/xxxx"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("2001:db8::/32/s"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("1/"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("/1"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress(" 1/ "), isc::InvalidParameter);
+}
+
+const struct sockaddr&
+getSockAddr(const char* const addr) {
+ struct addrinfo hints, *res;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = AI_NUMERICHOST;
+
+ if (getaddrinfo(addr, NULL, &hints, &res) == 0) {
+ static struct sockaddr_storage ss;
+ void* ss_ptr = &ss;
+ memcpy(ss_ptr, res->ai_addr, res->ai_addrlen);
+ freeaddrinfo(res);
+ return (*static_cast<struct sockaddr*>(ss_ptr));
+ }
+
+ // We don't expect getaddrinfo to fail for our tests. But if that
+ // ever happens we return a dummy value that would make subsequent test
+ // fail.
+ static struct sockaddr sa_dummy;
+ sa_dummy.sa_family = AF_UNSPEC;
+ return (sa_dummy);
+}
+
+TEST(IPAddress, constructIPv4) {
+ IPAddress ipaddr(getSockAddr("192.0.2.1"));
+ const char expected_data[4] = { 192, 0, 2, 1 };
+ EXPECT_EQ(AF_INET, ipaddr.getFamily());
+ EXPECT_EQ(4, ipaddr.getLength());
+ EXPECT_EQ(0, memcmp(expected_data, ipaddr.getData(), 4));
+}
+
+TEST(IPAddress, constructIPv6) {
+ IPAddress ipaddr(getSockAddr("2001:db8:1234:abcd::53"));
+ const char expected_data[16] = { 0x20, 0x01, 0x0d, 0xb8, 0x12, 0x34, 0xab,
+ 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x53 };
+ EXPECT_EQ(AF_INET6, ipaddr.getFamily());
+ EXPECT_EQ(16, ipaddr.getLength());
+ EXPECT_EQ(0, memcmp(expected_data, ipaddr.getData(), 16));
+}
+
+TEST(IPAddress, badConstruct) {
+ struct sockaddr sa;
+ sa.sa_family = AF_UNSPEC;
+ EXPECT_THROW(IPAddress ipaddr(sa), isc::BadValue);
+}
+
+// *** IPv4 Tests ***
+
+TEST(IPCheck, V4StringConstructor) {
+
+ // Constructor with no prefix length given (32 is assumed).
+ IPCheck<GeneralAddress> acl1("192.0.2.255");
+ EXPECT_EQ(32, acl1.getPrefixlen());
+ EXPECT_EQ(AF_INET, acl1.getFamily());
+
+ vector<uint8_t> stored1 = acl1.getAddress();
+ EXPECT_EQ(IPV4_SIZE, stored1.size());
+ GeneralAddress expected1(0xc00002ff);
+ EXPECT_TRUE(expected1.equals(stored1));
+
+ // Constructor with valid mask given
+ IPCheck<GeneralAddress> acl2("192.0.2.0/24");
+ EXPECT_EQ(24, acl2.getPrefixlen());
+ EXPECT_EQ(AF_INET, acl2.getFamily());
+
+ vector<uint8_t> stored2 = acl2.getAddress();
+ EXPECT_EQ(IPV4_SIZE, stored2.size());
+ GeneralAddress expected2(0xc0000200);
+ EXPECT_TRUE(expected2.equals(stored2));
+
+ // More valid masks
+ IPCheck<GeneralAddress> acl3("192.0.2.1/0");
+ EXPECT_EQ(0, acl3.getPrefixlen());
+ EXPECT_EQ(AF_INET, acl3.getFamily());
+
+ vector<uint8_t> stored3 = acl3.getAddress();
+ EXPECT_EQ(IPV4_SIZE, stored3.size());
+ GeneralAddress expected3(0xc0000201);
+ EXPECT_TRUE(expected3.equals(stored3));
+
+ IPCheck<GeneralAddress> acl4("192.0.2.2/32");
+ EXPECT_EQ(32, acl4.getPrefixlen());
+ EXPECT_EQ(AF_INET, acl4.getFamily());
+
+ vector<uint8_t> stored4 = acl4.getAddress();
+ EXPECT_EQ(IPV4_SIZE, stored4.size());
+ GeneralAddress expected4(0xc0000202);
+ EXPECT_TRUE(expected4.equals(stored4));
+
+ // Any match
+ IPCheck<GeneralAddress> acl5("any4");
+ EXPECT_EQ(0, acl5.getPrefixlen());
+ EXPECT_EQ(AF_INET, acl5.getFamily());
+
+ vector<uint8_t> stored5 = acl5.getAddress();
+ EXPECT_EQ(IPV4_SIZE, stored5.size());
+ GeneralAddress expected5(0);
+ EXPECT_TRUE(expected5.equals(stored5));
+
+ // Invalid prefix lengths
+ EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/33"), isc::OutOfRange);
+
+ // ... and invalid strings
+ EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/-1"),
+ isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/24/3"),
+ isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/ww"),
+ isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>("aa.255.255.0/ww"),
+ isc::InvalidParameter);
+}
+
+TEST(IPCheck, V4CopyConstructor) {
+ IPCheck<GeneralAddress> acl1("192.0.2.1/24");
+ IPCheck<GeneralAddress> acl2(acl1);
+
+ EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+ EXPECT_EQ(acl1.getFamily(), acl2.getFamily());
+
+ vector<uint8_t> net1 = acl1.getMask();
+ vector<uint8_t> net2 = acl2.getMask();
+ EXPECT_EQ(net1.size(), net2.size());
+ EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+
+ net1 = acl1.getAddress();
+ net2 = acl2.getAddress();
+ EXPECT_EQ(net1.size(), net2.size());
+ EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+}
+
+TEST(IPCheck, V4AssignmentOperator) {
+ IPCheck<GeneralAddress> acl1("192.0.2.0/24");
+ IPCheck<GeneralAddress> acl2("192.0.2.128/25");
+ acl2 = acl1;
+
+ EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+ EXPECT_EQ(acl1.getFamily(), acl2.getFamily());
+
+ vector<uint8_t> net1 = acl1.getMask();
+ vector<uint8_t> net2 = acl2.getMask();
+ EXPECT_EQ(net1.size(), net2.size());
+ EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+
+ net1 = acl1.getAddress();
+ net2 = acl2.getAddress();
+ EXPECT_EQ(net1.size(), net2.size());
+ EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+}
+
+// Check that the comparison works - note that "matches" just calls the
+// internal compare() code. (Also note that the argument to matches() will be
+// automatically converted to the GeneralAddress data type used for the tests
+// because of its constructor taking a uint32_t argument.
+
+TEST(IPCheck, V4Compare) {
+ // Exact address - match if given address matches stored address.
+ IPCheck<GeneralAddress> acl1("192.0.2.255/32");
+ EXPECT_TRUE(acl1.matches(0xc00002ff));
+ EXPECT_FALSE(acl1.matches(0xc00002fe));
+ EXPECT_FALSE(acl1.matches(0x13457f13));
+
+ IPCheck<GeneralAddress> acl2("192.0.2.255/27");
+ EXPECT_TRUE(acl2.matches(0xc00002ff));
+ EXPECT_TRUE(acl2.matches(0xc00002fe));
+ EXPECT_TRUE(acl2.matches(0xc00002ee));
+ EXPECT_FALSE(acl2.matches(0xc00002de));
+ EXPECT_FALSE(acl2.matches(0xd00002fe));
+ EXPECT_FALSE(acl2.matches(0x13457f13));
+
+ // Match if "any4" is specified
+ IPCheck<GeneralAddress> acl3("any4");
+ EXPECT_TRUE(acl3.matches(0xc00002ff));
+ EXPECT_TRUE(acl3.matches(0xc00002fe));
+ EXPECT_TRUE(acl3.matches(0xc00002ee));
+ EXPECT_TRUE(acl3.matches(0xc00002de));
+ EXPECT_TRUE(acl3.matches(0xd00002fe));
+ EXPECT_TRUE(acl3.matches(0x13457f13));
+
+ IPCheck<GeneralAddress> acl4("0.0.0.0/0");
+ EXPECT_TRUE(acl4.matches(0xc00002ff));
+ EXPECT_TRUE(acl4.matches(0xc00002fe));
+ EXPECT_TRUE(acl4.matches(0xc00002ee));
+ EXPECT_TRUE(acl4.matches(0xc00002de));
+ EXPECT_TRUE(acl4.matches(0xd00002fe));
+ EXPECT_TRUE(acl4.matches(0x13457f13));
+
+ IPCheck<GeneralAddress> acl5("192.0.2.255/0");
+ EXPECT_TRUE(acl5.matches(0xc00002ff));
+ EXPECT_TRUE(acl5.matches(0xc00002fe));
+ EXPECT_TRUE(acl5.matches(0xc00002ee));
+ EXPECT_TRUE(acl5.matches(0xc00002de));
+ EXPECT_TRUE(acl5.matches(0xd00002fe));
+ EXPECT_TRUE(acl5.matches(0x13457f13));
+}
+
+// *** IPV6 Tests ***
+
+// Some constants used in the tests
+
+const char* V6ADDR_1_STRING = "2001:0db8:1122:3344:5566:7788:99aa:bbcc";
+const uint8_t V6ADDR_1[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x11, 0x22, 0x33, 0x44,
+ 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc
+};
+
+const char* V6ADDR_2_STRING = "2001:0db8::dead:beef";
+const uint8_t V6ADDR_2[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 48 bits
+const uint8_t V6ADDR_2_48[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0xff, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 49 bits
+const uint8_t V6ADDR_2_49[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x7f, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 50 bits
+const uint8_t V6ADDR_2_50[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x3f, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 51 bits
+const uint8_t V6ADDR_2_51[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x1f, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 51 bits
+const uint8_t V6ADDR_2_52[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x0f, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 127 bits
+const uint8_t V6ADDR_2_127[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xee
+};
+
+const uint8_t V6ADDR_3[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+const uint8_t V6ADDR_4[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+TEST(IPCheck, V6StringConstructor) {
+ IPCheck<GeneralAddress> acl1(V6ADDR_1_STRING);
+ vector<uint8_t> address = acl1.getAddress();
+
+ EXPECT_EQ(128, acl1.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl1.getFamily());
+ EXPECT_EQ(IPV6_SIZE, address.size());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_1));
+
+ IPCheck<GeneralAddress> acl2(string(V6ADDR_2_STRING) + string("/51"));
+ address = acl2.getAddress();
+ EXPECT_EQ(IPV6_SIZE, address.size());
+ EXPECT_EQ(51, acl2.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl2.getFamily());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_2));
+
+ IPCheck<GeneralAddress> acl3(string(V6ADDR_2_STRING) + string("/127"));
+ address = acl3.getAddress();
+ EXPECT_EQ(IPV6_SIZE, address.size());
+ EXPECT_EQ(127, acl3.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl3.getFamily());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_2));
+
+ IPCheck<GeneralAddress> acl4("::1");
+ address = acl4.getAddress();
+ EXPECT_EQ(IPV6_SIZE, address.size());
+ EXPECT_EQ(128, acl4.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl4.getFamily());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_3));
+
+ // Any match. In these cases, the address should all be zeroes.
+ IPCheck<GeneralAddress> acl5("any6");
+ address = acl5.getAddress();
+ EXPECT_EQ(IPV6_SIZE, address.size());
+ EXPECT_EQ(0, acl5.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl5.getFamily());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_4));
+
+ IPCheck<GeneralAddress> acl6("::/0");
+ address = acl6.getAddress();
+ EXPECT_EQ(0, acl6.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl6.getFamily());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_4));
+
+ // Some invalid strings
+ EXPECT_THROW(IPCheck<GeneralAddress>("::1/129"), isc::OutOfRange);
+ EXPECT_THROW(IPCheck<GeneralAddress>("::1/24/3"), isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>(":::1/24"), isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>("2001:0db8::abcd/ww"),
+ isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>("2xx1:0db8::abcd/32"),
+ isc::InvalidParameter);
+}
+
+TEST(IPCheck, V6CopyConstructor) {
+ IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/52"));
+ IPCheck<GeneralAddress> acl2(acl1);
+
+ vector<uint8_t> acl1_address = acl1.getAddress();
+ vector<uint8_t> acl2_address = acl1.getAddress();
+ EXPECT_EQ(sizeof(V6ADDR_1), acl1_address.size());
+ EXPECT_EQ(acl1_address.size(), acl2_address.size());
+ EXPECT_TRUE(equal(acl1_address.begin(), acl1_address.end(),
+ acl2_address.begin()));
+
+ EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+
+ vector<uint8_t> acl1_mask = acl1.getMask();
+ vector<uint8_t> acl2_mask = acl1.getMask();
+ EXPECT_EQ(sizeof(V6ADDR_1), acl1_mask.size());
+ EXPECT_EQ(acl1_mask.size(), acl2_mask.size());
+ EXPECT_TRUE(equal(acl1_mask.begin(), acl1_mask.end(),
+ acl2_mask.begin()));
+}
+
+TEST(IPCheck, V6AssignmentOperator) {
+ IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/52"));
+ IPCheck<GeneralAddress> acl2(string(V6ADDR_1_STRING) + string("/48"));
+
+ acl2 = acl1;
+
+ vector<uint8_t> acl1_address = acl1.getAddress();
+ vector<uint8_t> acl2_address = acl2.getAddress();
+ EXPECT_EQ(sizeof(V6ADDR_1), acl1_address.size());
+ EXPECT_EQ(acl1_address.size(), acl2_address.size());
+ EXPECT_TRUE(equal(acl1_address.begin(), acl1_address.end(),
+ acl2_address.begin()));
+
+ EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+
+ vector<uint8_t> acl1_mask = acl1.getMask();
+ vector<uint8_t> acl2_mask = acl2.getMask();
+ EXPECT_EQ(sizeof(V6ADDR_1), acl1_mask.size());
+ EXPECT_EQ(acl1_mask.size(), acl2_mask.size());
+ EXPECT_TRUE(equal(acl1_mask.begin(), acl1_mask.end(),
+ acl2_mask.begin()));
+}
+
+TEST(IPCheck, V6Compare) {
+ // Set up some data.
+ vector<uint8_t> v6addr_2(V6ADDR_2, V6ADDR_2 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_48(V6ADDR_2_48, V6ADDR_2_48 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_49(V6ADDR_2_49, V6ADDR_2_49 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_50(V6ADDR_2_50, V6ADDR_2_50 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_51(V6ADDR_2_51, V6ADDR_2_51 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_52(V6ADDR_2_52, V6ADDR_2_52 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_127(V6ADDR_2_127, V6ADDR_2_127 + IPV6_SIZE);
+ vector<uint8_t> v6addr_3(V6ADDR_3, V6ADDR_3 + IPV6_SIZE);
+
+ // Exact address - match if given address matches stored address.
+ IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/128"));
+ EXPECT_TRUE(acl1.matches(v6addr_2));
+ EXPECT_FALSE(acl1.matches(v6addr_2_127));
+ EXPECT_FALSE(acl1.matches(v6addr_2_52));
+ EXPECT_FALSE(acl1.matches(v6addr_2_51));
+ EXPECT_FALSE(acl1.matches(v6addr_2_50));
+ EXPECT_FALSE(acl1.matches(v6addr_2_49));
+ EXPECT_FALSE(acl1.matches(v6addr_2_48));
+ EXPECT_FALSE(acl1.matches(v6addr_3));
+
+ // Match to various prefixes.
+ IPCheck<GeneralAddress> acl2(string(V6ADDR_2_STRING) + string("/127"));
+ EXPECT_TRUE(acl2.matches(v6addr_2));
+ EXPECT_TRUE(acl2.matches(v6addr_2_127));
+ EXPECT_FALSE(acl2.matches(v6addr_2_52));
+ EXPECT_FALSE(acl2.matches(v6addr_2_51));
+ EXPECT_FALSE(acl2.matches(v6addr_2_50));
+ EXPECT_FALSE(acl2.matches(v6addr_2_49));
+ EXPECT_FALSE(acl2.matches(v6addr_2_48));
+ EXPECT_FALSE(acl2.matches(v6addr_3));
+
+ IPCheck<GeneralAddress> acl3(string(V6ADDR_2_STRING) + string("/52"));
+ EXPECT_TRUE(acl3.matches(v6addr_2));
+ EXPECT_TRUE(acl3.matches(v6addr_2_127));
+ EXPECT_TRUE(acl3.matches(v6addr_2_52));
+ EXPECT_FALSE(acl3.matches(v6addr_2_51));
+ EXPECT_FALSE(acl3.matches(v6addr_2_50));
+ EXPECT_FALSE(acl3.matches(v6addr_2_49));
+ EXPECT_FALSE(acl3.matches(v6addr_2_48));
+ EXPECT_FALSE(acl3.matches(v6addr_3));
+
+ IPCheck<GeneralAddress> acl4(string(V6ADDR_2_STRING) + string("/51"));
+ EXPECT_TRUE(acl4.matches(v6addr_2));
+ EXPECT_TRUE(acl4.matches(v6addr_2_127));
+ EXPECT_TRUE(acl4.matches(v6addr_2_52));
+ EXPECT_TRUE(acl4.matches(v6addr_2_51));
+ EXPECT_FALSE(acl4.matches(v6addr_2_50));
+ EXPECT_FALSE(acl4.matches(v6addr_2_49));
+ EXPECT_FALSE(acl4.matches(v6addr_2_48));
+ EXPECT_FALSE(acl4.matches(v6addr_3));
+
+ IPCheck<GeneralAddress> acl5(string(V6ADDR_2_STRING) + string("/50"));
+ EXPECT_TRUE(acl5.matches(v6addr_2));
+ EXPECT_TRUE(acl5.matches(v6addr_2_127));
+ EXPECT_TRUE(acl5.matches(v6addr_2_52));
+ EXPECT_TRUE(acl5.matches(v6addr_2_51));
+ EXPECT_TRUE(acl5.matches(v6addr_2_50));
+ EXPECT_FALSE(acl5.matches(v6addr_2_49));
+ EXPECT_FALSE(acl5.matches(v6addr_2_48));
+ EXPECT_FALSE(acl5.matches(v6addr_3));
+
+ IPCheck<GeneralAddress> acl6(string(V6ADDR_2_STRING) + string("/0"));
+ EXPECT_TRUE(acl6.matches(v6addr_2));
+ EXPECT_TRUE(acl6.matches(v6addr_2_127));
+ EXPECT_TRUE(acl6.matches(v6addr_2_52));
+ EXPECT_TRUE(acl6.matches(v6addr_2_51));
+ EXPECT_TRUE(acl6.matches(v6addr_2_50));
+ EXPECT_TRUE(acl6.matches(v6addr_2_49));
+ EXPECT_TRUE(acl6.matches(v6addr_2_48));
+ EXPECT_TRUE(acl6.matches(v6addr_3));
+
+ // Match on any address
+ IPCheck<GeneralAddress> acl7("any6");
+ EXPECT_TRUE(acl7.matches(v6addr_2));
+ EXPECT_TRUE(acl7.matches(v6addr_2_127));
+ EXPECT_TRUE(acl7.matches(v6addr_2_52));
+ EXPECT_TRUE(acl7.matches(v6addr_2_51));
+ EXPECT_TRUE(acl7.matches(v6addr_2_50));
+ EXPECT_TRUE(acl7.matches(v6addr_2_49));
+ EXPECT_TRUE(acl7.matches(v6addr_2_48));
+}
+
+// *** Mixed-mode tests - mainly to check that no exception is thrown ***
+
+TEST(IPCheck, MixedMode) {
+
+ // ACL has a V4 address specified, check against a V6 address.
+ IPCheck<GeneralAddress> acl1("192.0.2.255/24");
+ GeneralAddress test1(vector<uint8_t>(V6ADDR_1, V6ADDR_1 + IPV6_SIZE));
+ EXPECT_NO_THROW(acl1.matches(test1));
+ EXPECT_FALSE(acl1.matches(test1));
+
+ // Now the reverse - the ACL is specified with a V6 address.
+ IPCheck<GeneralAddress> acl2(V6ADDR_2_STRING);
+ GeneralAddress test2(0x12345678);
+ EXPECT_FALSE(acl2.matches(test2));
+
+ // Ensure only a V4 address matches "any4".
+ IPCheck<GeneralAddress> acl3("any4");
+ EXPECT_FALSE(acl3.matches(test1));
+ EXPECT_TRUE(acl3.matches(test2));
+
+ // ... and check the reverse
+ IPCheck<GeneralAddress> acl4("any6");
+ EXPECT_TRUE(acl4.matches(test1));
+ EXPECT_FALSE(acl4.matches(test2));
+
+ // Check where the bit pattern of an IPv4 address matches that of an IPv6
+ // one.
+ IPCheck<GeneralAddress> acl5("2001:db8::/32");
+ GeneralAddress test5(0x20010db8);
+ EXPECT_FALSE(acl5.matches(test5));
+
+ // ... and where the reverse is true. (2001:db8 corresponds to 32.1.13.184).
+ IPCheck<GeneralAddress> acl6("32.1.13.184");
+ GeneralAddress test6(vector<uint8_t>(V6ADDR_1, V6ADDR_1 + IPV6_SIZE));
+ EXPECT_FALSE(acl6.matches(test6));
+}
+} // Unnamed namespace
diff --git a/src/lib/acl/tests/loader_test.cc b/src/lib/acl/tests/loader_test.cc
index 92d40a8..4415081 100644
--- a/src/lib/acl/tests/loader_test.cc
+++ b/src/lib/acl/tests/loader_test.cc
@@ -12,28 +12,26 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include "logcheck.h"
+#include "creators.h"
#include <acl/loader.h>
#include <string>
#include <gtest/gtest.h>
using namespace std;
using namespace boost;
+using namespace isc::acl;
+using namespace isc::acl::tests;
+using isc::data::Element;
using isc::data::ConstElementPtr;
namespace {
-// Just for convenience, create JSON objects from JSON string
-ConstElementPtr el(const string& JSON) {
- return (isc::data::Element::fromJSON(JSON));
-}
-
// We don't use the EXPECT_THROW macro, as it doesn't allow us
// to examine the exception. We want to check the element is stored
// there as well.
void testActionLoaderException(const string& JSON) {
SCOPED_TRACE("Should throw with input: " + JSON);
- ConstElementPtr elem(el(JSON));
+ ConstElementPtr elem(Element::fromJSON(JSON));
try {
defaultActionLoader(elem);
FAIL() << "It did not throw";
@@ -48,9 +46,9 @@ void testActionLoaderException(const string& JSON) {
// Test the defaultActionLoader function
TEST(LoaderHelpers, DefaultActionLoader) {
// First the three valid inputs
- EXPECT_EQ(ACCEPT, defaultActionLoader(el("\"ACCEPT\"")));
- EXPECT_EQ(REJECT, defaultActionLoader(el("\"REJECT\"")));
- EXPECT_EQ(DROP, defaultActionLoader(el("\"DROP\"")));
+ EXPECT_EQ(ACCEPT, defaultActionLoader(Element::fromJSON("\"ACCEPT\"")));
+ EXPECT_EQ(REJECT, defaultActionLoader(Element::fromJSON("\"REJECT\"")));
+ EXPECT_EQ(DROP, defaultActionLoader(Element::fromJSON("\"DROP\"")));
// Now few invalid ones
// String, but unknown one
testActionLoaderException("\"UNKNOWN\"");
@@ -61,122 +59,6 @@ TEST(LoaderHelpers, DefaultActionLoader) {
testActionLoaderException("{}");
}
-// A check that doesn't check anything but remembers it's own name
-// and data
-class NamedCheck : public Check<Log> {
-public:
- NamedCheck(const string& name, ConstElementPtr data) :
- name_(name),
- data_(data)
- {}
- virtual bool matches(const Log&) const { return (true); }
- const string name_;
- const ConstElementPtr data_;
-};
-
-// The creator of NamedCheck
-class NamedCreator : public Loader<Log>::CheckCreator {
-public:
- NamedCreator(const string& name, bool abbreviatedList = true) :
- abbreviated_list_(abbreviatedList)
- {
- names_.push_back(name);
- }
- NamedCreator(const vector<string>& names) :
- names_(names),
- abbreviated_list_(true)
- {}
- vector<string> names() const {
- return (names_);
- }
- shared_ptr<Check<Log> > create(const string& name, ConstElementPtr data,
- const Loader<Log>&)
- {
- bool found(false);
- for (vector<string>::const_iterator i(names_.begin());
- i != names_.end(); ++i) {
- if (*i == name) {
- found = true;
- break;
- }
- }
- EXPECT_TRUE(found) << "Name " << name << " passed to creator which "
- "doesn't handle it.";
- return (shared_ptr<Check<Log> >(new NamedCheck(name, data)));
- }
- bool allowListAbbreviation() const {
- return (abbreviated_list_);
- }
-private:
- vector<string> names_;
- const bool abbreviated_list_;
-};
-
-// To be thrown in tests internally
-class TestCreatorError {};
-
-// This will throw every time it should create something
-class ThrowCreator : public Loader<Log>::CheckCreator {
-public:
- vector<string> names() const {
- vector<string> result;
- result.push_back("throw");
- return (result);
- }
- shared_ptr<Check<Log> > create(const string&, ConstElementPtr,
- const Loader<Log>&)
- {
- throw TestCreatorError();
- }
-};
-
-// This throws whenever the match is called on it
-class ThrowCheck : public Check<Log> {
-public:
- virtual bool matches(const Log&) const {
- throw TestCreatorError();
- }
-};
-
-// And creator for it
-class ThrowCheckCreator : public Loader<Log>::CheckCreator {
-public:
- vector<string> names() const {
- vector<string> result;
- result.push_back("throwcheck");
- return (result);
- }
- shared_ptr<Check<Log> > create(const string&, ConstElementPtr,
- const Loader<Log>&)
- {
- return (shared_ptr<Check<Log> >(new ThrowCheck()));
- }
-};
-
-class LogCreator : public Loader<Log>::CheckCreator {
-public:
- vector<string> names() const {
- vector<string> result;
- result.push_back("logcheck");
- return (result);
- }
- /*
- * For simplicity, we just take two values as a list, first is the
- * logging cell used, the second is result of the check. No error checking
- * is done, if there's bug in the test, it will throw TypeError for us.
- */
- shared_ptr<Check<Log> > create(const string&, ConstElementPtr definition,
- const Loader<Log>&)
- {
- vector<ConstElementPtr> list(definition->listValue());
- int logpos(list[0]->intValue());
- bool accept(list[1]->boolValue());
- return (shared_ptr<ConstCheck>(new ConstCheck(accept, logpos)));
- }
- // We take a list, so don't interpret it for us
- virtual bool allowListAbbreviation() const { return (false); }
-};
-
class LoaderTest : public ::testing::Test {
public:
LoaderTest() :
@@ -198,20 +80,26 @@ public:
EXPECT_NO_THROW(loader_.registerCreator(
namedCreator(name, abbreviatedList)));
}
- // Load a check and convert it to named check to examine it
- shared_ptr<NamedCheck> loadCheck(const string& definition) {
+ template<class Result> shared_ptr<Result> loadCheckAny(const string&
+ definition)
+ {
SCOPED_TRACE("Loading check " + definition);
shared_ptr<Check<Log> > loaded;
- EXPECT_NO_THROW(loaded = loader_.loadCheck(el(definition)));
- shared_ptr<NamedCheck> result(dynamic_pointer_cast<NamedCheck>(
+ EXPECT_NO_THROW(loaded = loader_.loadCheck(
+ Element::fromJSON(definition)));
+ shared_ptr<Result> result(dynamic_pointer_cast<Result>(
loaded));
EXPECT_TRUE(result);
return (result);
}
+ // Load a check and convert it to named check to examine it
+ shared_ptr<NamedCheck> loadCheck(const string& definition) {
+ return (loadCheckAny<NamedCheck>(definition));
+ }
// The loadCheck throws an exception
void checkException(const string& JSON) {
SCOPED_TRACE("Loading check exception: " + JSON);
- ConstElementPtr input(el(JSON));
+ ConstElementPtr input(Element::fromJSON(JSON));
// Not using EXPECT_THROW, we want to examine the exception
try {
loader_.loadCheck(input);
@@ -245,7 +133,7 @@ public:
SCOPED_TRACE("Running ACL for " + JSON);
aclSetup();
shared_ptr<ACL<Log> > acl;
- EXPECT_NO_THROW(acl = loader_.load(el(JSON)));
+ EXPECT_NO_THROW(acl = loader_.load(Element::fromJSON(JSON)));
EXPECT_EQ(expectedResult, acl->execute(log_));
log_.checkFirst(logged);
}
@@ -253,7 +141,21 @@ public:
void aclException(const string& JSON) {
SCOPED_TRACE("Trying to load bad " + JSON);
aclSetup();
- EXPECT_THROW(loader_.load(el(JSON)), LoaderError);
+ EXPECT_THROW(loader_.load(Element::fromJSON(JSON)), LoaderError);
+ }
+ // Check that the subexpression is NamedCheck with correct data
+ void isSubexprNamed(const CompoundCheck<Log>* compound, size_t index,
+ const string& name, ConstElementPtr data)
+ {
+ if (index < compound->getSubexpressions().size()) {
+ const NamedCheck*
+ check(dynamic_cast<const NamedCheck*>(compound->
+ getSubexpressions()
+ [index]));
+ ASSERT_TRUE(check) << "The subexpression is of different type";
+ EXPECT_EQ(name, check->name_);
+ EXPECT_TRUE(data->equals(*check->data_));
+ }
}
};
@@ -282,7 +184,7 @@ TEST_F(LoaderTest, SimpleCheckLoad) {
addNamed("name");
shared_ptr<NamedCheck> check(loadCheck("{\"name\": 42}"));
EXPECT_EQ("name", check->name_);
- EXPECT_TRUE(check->data_->equals(*el("42")));
+ EXPECT_TRUE(check->data_->equals(*Element::fromJSON("42")));
}
// As above, but there are multiple creators registered within the loader
@@ -291,7 +193,7 @@ TEST_F(LoaderTest, MultiCreatorCheckLoad) {
addNamed("name2");
shared_ptr<NamedCheck> check(loadCheck("{\"name2\": 42}"));
EXPECT_EQ("name2", check->name_);
- EXPECT_TRUE(check->data_->equals(*el("42")));
+ EXPECT_TRUE(check->data_->equals(*Element::fromJSON("42")));
}
// Similar to above, but there's a creator with multiple names
@@ -304,7 +206,7 @@ TEST_F(LoaderTest, MultiNameCheckLoad) {
new NamedCreator(names))));
shared_ptr<NamedCheck> check(loadCheck("{\"name3\": 42}"));
EXPECT_EQ("name3", check->name_);
- EXPECT_TRUE(check->data_->equals(*el("42")));
+ EXPECT_TRUE(check->data_->equals(*Element::fromJSON("42")));
}
// Invalid format is rejected
@@ -328,22 +230,71 @@ TEST_F(LoaderTest, UnkownName) {
// Exception from the creator is propagated
TEST_F(LoaderTest, CheckPropagate) {
loader_.registerCreator(shared_ptr<ThrowCreator>(new ThrowCreator()));
- EXPECT_THROW(loader_.loadCheck(el("{\"throw\": null}")), TestCreatorError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"throw\": null}")),
+ TestCreatorError);
}
-// The abbreviated form is not yet implemented
-// (we need the operators to be implemented)
+// The abbreviated form of check
TEST_F(LoaderTest, AndAbbrev) {
addNamed("name1");
addNamed("name2");
- EXPECT_THROW(loader_.loadCheck(el("{\"name1\": 1, \"name2\": 2}")),
- LoaderError);
+ shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+ loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": 2}"));
+ // If we don't have anything loaded, the rest would crash. It is already
+ // reported from within loadCheckAny if it isn't loaded.
+ if (oper) {
+ // The subexpressions are correct
+ EXPECT_EQ(2, oper->getSubexpressions().size());
+ // Note: this test relies on the ordering in which map returns it's
+ // elements, which is in the lexicographical order of the strings.
+ // This is not required from our interface, but is easier to write
+ // the test.
+ isSubexprNamed(&*oper, 0, "name1", Element::fromJSON("1"));
+ isSubexprNamed(&*oper, 1, "name2", Element::fromJSON("2"));
+ }
}
+// The abbreviated form of parameters
TEST_F(LoaderTest, OrAbbrev) {
addNamed("name1");
- EXPECT_THROW(loader_.loadCheck(el("{\"name1\": [1, 2]}")),
- LoaderError);
+ shared_ptr<LogicOperator<AnyOfSpec, Log> > oper(
+ loadCheckAny<LogicOperator<AnyOfSpec, Log> >("{\"name1\": [1, 2]}"));
+ // If we don't have anything loaded, the rest would crash. It is already
+ // reported from within loadCheckAny if it isn't loaded.
+ if (oper) {
+ // The subexpressions are correct
+ EXPECT_EQ(2, oper->getSubexpressions().size());
+ isSubexprNamed(&*oper, 0, "name1", Element::fromJSON("1"));
+ isSubexprNamed(&*oper, 1, "name1", Element::fromJSON("2"));
+ }
+}
+
+// Combined abbreviated form, both at once
+
+// The abbreviated form of check
+TEST_F(LoaderTest, BothAbbrev) {
+ addNamed("name1");
+ addNamed("name2");
+ shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+ loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": [3, 4]}"));
+ // If we don't have anything loaded, the rest would crash. It is already
+ // reported from within loadCheckAny if it isn't loaded.
+ if (oper) {
+ // The subexpressions are correct
+ ASSERT_EQ(2, oper->getSubexpressions().size());
+ // Note: this test relies on the ordering in which map returns it's
+ // elements, which is in the lexicographical order of the strings.
+ // This is not required from our interface, but is easier to write
+ // the test.
+ isSubexprNamed(&*oper, 0, "name1", Element::fromJSON("1"));
+ const LogicOperator<AnyOfSpec, Log>*
+ orOper(dynamic_cast<const LogicOperator<AnyOfSpec, Log>*>(
+ oper->getSubexpressions()[1]));
+ ASSERT_TRUE(orOper) << "Different type than AnyOf operator";
+ EXPECT_EQ(2, orOper->getSubexpressions().size());
+ isSubexprNamed(orOper, 0, "name2", Element::fromJSON("3"));
+ isSubexprNamed(orOper, 1, "name2", Element::fromJSON("4"));
+ }
}
// But this is not abbreviated form, this should be passed directly to the
@@ -352,7 +303,7 @@ TEST_F(LoaderTest, ListCheck) {
addNamed("name1", false);
shared_ptr<NamedCheck> check(loadCheck("{\"name1\": [1, 2]}"));
EXPECT_EQ("name1", check->name_);
- EXPECT_TRUE(check->data_->equals(*el("[1, 2]")));
+ EXPECT_TRUE(check->data_->equals(*Element::fromJSON("[1, 2]")));
}
// Check the action key is ignored as it should be
@@ -360,7 +311,7 @@ TEST_F(LoaderTest, CheckNoAction) {
addNamed("name1");
shared_ptr<NamedCheck> check(loadCheck("{\"name1\": 1, \"action\": 2}"));
EXPECT_EQ("name1", check->name_);
- EXPECT_TRUE(check->data_->equals(*el("1")));
+ EXPECT_TRUE(check->data_->equals(*Element::fromJSON("1")));
}
// The empty ACL can be created and run, providing the default action
@@ -418,7 +369,9 @@ TEST_F(LoaderTest, NoAction) {
// Exceptions from check creation is propagated
TEST_F(LoaderTest, ACLPropagate) {
aclSetup();
- EXPECT_THROW(loader_.load(el("[{\"action\": \"ACCEPT\", \"throw\": 1}]")),
+ EXPECT_THROW(loader_.load(
+ Element::fromJSON(
+ "[{\"action\": \"ACCEPT\", \"throw\": 1}]")),
TestCreatorError);
}
diff --git a/src/lib/acl/tests/logcheck.h b/src/lib/acl/tests/logcheck.h
index c5e1bb1..424c53d 100644
--- a/src/lib/acl/tests/logcheck.h
+++ b/src/lib/acl/tests/logcheck.h
@@ -12,18 +12,18 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#ifndef LOGCHECK_H
+#define LOGCHECK_H
+
#include <gtest/gtest.h>
#include <acl/acl.h>
#include <cassert>
-// This is not a public header, it is used only inside the tests. Therefore
-// we lower the standards a bit and use anonymous namespace in the header
-// and "using", just for convenience. This is just to share little bit of code
-// between multiple tests.
-using namespace isc::acl;
-using boost::shared_ptr;
+// This is not a public header, it is used only inside the tests.
-namespace {
+namespace isc {
+namespace acl {
+namespace tests {
// This is arbitrary guess of size for the log. If it's too small for your
// test, just make it bigger.
@@ -84,3 +84,11 @@ private:
};
}
+}
+}
+
+#endif
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/tests/logic_check_test.cc b/src/lib/acl/tests/logic_check_test.cc
new file mode 100644
index 0000000..eec6d51
--- /dev/null
+++ b/src/lib/acl/tests/logic_check_test.cc
@@ -0,0 +1,245 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "creators.h"
+#include <acl/logic_check.h>
+#include <typeinfo>
+#include <boost/shared_ptr.hpp> // for static_pointer_cast
+
+using namespace std;
+using namespace boost;
+using namespace isc::acl;
+using namespace isc::acl::tests;
+using isc::data::Element;
+
+namespace {
+
+// Test the defs in AnyOfSpec
+TEST(LogicOperators, AnyOfSpec) {
+ EXPECT_FALSE(AnyOfSpec::start());
+ EXPECT_FALSE(AnyOfSpec::terminate(false));
+ EXPECT_TRUE(AnyOfSpec::terminate(true));
+}
+
+// Test the defs in AllOfSpec
+TEST(LogicOperators, AllOfSpec) {
+ EXPECT_TRUE(AllOfSpec::start());
+ EXPECT_TRUE(AllOfSpec::terminate(false));
+ EXPECT_FALSE(AllOfSpec::terminate(true));
+}
+
+// Generic test of one check
+template<typename Mode>
+void
+testCheck(bool emptyResult) {
+ // It can be created
+ LogicOperator<Mode, Log> oper;
+ // It is empty by default
+ EXPECT_EQ(0, oper.getSubexpressions().size());
+ // And returns true, as all 0 of the subexpressions return true
+ Log log;
+ EXPECT_EQ(emptyResult, oper.matches(log));
+ log.checkFirst(0);
+ // Fill it with some subexpressions
+ typedef shared_ptr<ConstCheck> CheckPtr;
+ oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 0)));
+ oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 1)));
+ // Check what happens when only the default-valued are there
+ EXPECT_EQ(2, oper.getSubexpressions().size());
+ EXPECT_EQ(emptyResult, oper.matches(log));
+ log.checkFirst(2);
+ oper.addSubexpression(CheckPtr(new ConstCheck(!emptyResult, 2)));
+ oper.addSubexpression(CheckPtr(new ConstCheck(!emptyResult, 3)));
+ // They are listed there
+ EXPECT_EQ(4, oper.getSubexpressions().size());
+ // Now, the last one kills it, but the first ones will run, the fourth
+ // won't
+ EXPECT_EQ(!emptyResult, oper.matches(log));
+ log.checkFirst(3);
+}
+
+TEST(LogicOperators, AllOf) {
+ testCheck<AllOfSpec>(true);
+}
+
+TEST(LogicOperators, AnyOf) {
+ testCheck<AnyOfSpec>(false);
+}
+
+// Fixture for the tests of the creators
+class LogicCreatorTest : public ::testing::Test {
+private:
+ typedef shared_ptr<Loader<Log>::CheckCreator> CreatorPtr;
+public:
+ // Register some creators, both tested ones and some auxiliary ones for
+ // help
+ LogicCreatorTest():
+ loader_(REJECT)
+ {
+ loader_.registerCreator(CreatorPtr(new
+ LogicCreator<AnyOfSpec, Log>("ANY")));
+ loader_.registerCreator(CreatorPtr(new
+ LogicCreator<AllOfSpec, Log>("ALL")));
+ loader_.registerCreator(CreatorPtr(new ThrowCreator));
+ loader_.registerCreator(CreatorPtr(new LogCreator));
+ }
+ // To mark which parts of the check did run
+ Log log_;
+ // The loader
+ Loader<Log> loader_;
+ // Some convenience shortcut names
+ typedef LogicOperator<AnyOfSpec, Log> AnyOf;
+ typedef LogicOperator<AllOfSpec, Log> AllOf;
+ typedef shared_ptr<AnyOf> AnyOfPtr;
+ typedef shared_ptr<AllOf> AllOfPtr;
+ // Loads the JSON as a check and tries to convert it to the given check
+ // subclass
+ template<typename Result> shared_ptr<Result> load(const string& JSON) {
+ shared_ptr<Check<Log> > result;
+ EXPECT_NO_THROW(result = loader_.loadCheck(Element::fromJSON(JSON)));
+ /*
+ * Optimally, we would use a dynamic_pointer_cast here to both
+ * convert the pointer and to check the type is correct. However,
+ * clang++ seems to be confused by templates and creates two typeids
+ * for the same templated type (even with the same parameters),
+ * therfore considering the types different, even if they are the same.
+ * This leads to false alarm in the test. Luckily, it generates the
+ * same name for both typeids, so we use them instead (which is enough
+ * to test the correct type of Check is returned). Then we can safely
+ * cast statically, as we don't use any kind of nasty things like
+ * multiple inheritance.
+ */
+ EXPECT_STREQ(typeid(Result).name(), typeid(*result.get()).name());
+ shared_ptr<Result>
+ resultConverted(static_pointer_cast<Result>(result));
+ EXPECT_NE(shared_ptr<Result>(), resultConverted);
+ return (resultConverted);
+ }
+};
+
+// Test it can load empty ones
+TEST_F(LogicCreatorTest, empty) {
+ AnyOfPtr emptyAny(load<AnyOf>("{\"ANY\": []}"));
+ EXPECT_EQ(0, emptyAny->getSubexpressions().size());
+ AllOfPtr emptyAll(load<AllOf>("{\"ALL\": []}"));
+ EXPECT_EQ(0, emptyAll->getSubexpressions().size());
+}
+
+// Test it rejects invalid inputs (not a list as a parameter)
+TEST_F(LogicCreatorTest, invalid) {
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ANY\": null}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ANY\": {}}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ANY\": true}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ANY\": 42}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ANY\": \"hello\"}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ALL\": null}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ALL\": {}}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ALL\": true}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ALL\": 42}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ALL\": \"hello\"}")),
+ LoaderError);
+}
+
+// Exceptions from subexpression creation isn't caught
+TEST_F(LogicCreatorTest, propagate) {
+ EXPECT_THROW(loader_.loadCheck(
+ Element::fromJSON("{\"ANY\": [{\"throw\": null}]}")),
+ TestCreatorError);
+ EXPECT_THROW(loader_.loadCheck(
+ Element::fromJSON("{\"ALL\": [{\"throw\": null}]}")),
+ TestCreatorError);
+}
+
+// We can create more complex ANY check and run it correctly
+TEST_F(LogicCreatorTest, anyRun) {
+ AnyOfPtr any(load<AnyOf>("{\"ANY\": ["
+ " {\"logcheck\": [0, false]},"
+ " {\"logcheck\": [1, true]},"
+ " {\"logcheck\": [2, true]}"
+ "]}"));
+ EXPECT_EQ(3, any->getSubexpressions().size());
+ EXPECT_TRUE(any->matches(log_));
+ log_.checkFirst(2);
+}
+
+// We can create more complex ALL check and run it correctly
+TEST_F(LogicCreatorTest, allRun) {
+ AllOfPtr any(load<AllOf>("{\"ALL\": ["
+ " {\"logcheck\": [0, true]},"
+ " {\"logcheck\": [1, false]},"
+ " {\"logcheck\": [2, false]}"
+ "]}"));
+ EXPECT_EQ(3, any->getSubexpressions().size());
+ EXPECT_FALSE(any->matches(log_));
+ log_.checkFirst(2);
+}
+
+// Or is able to return false
+TEST_F(LogicCreatorTest, anyFalse) {
+ AnyOfPtr any(load<AnyOf>("{\"ANY\": ["
+ " {\"logcheck\": [0, false]},"
+ " {\"logcheck\": [1, false]},"
+ " {\"logcheck\": [2, false]}"
+ "]}"));
+ EXPECT_EQ(3, any->getSubexpressions().size());
+ EXPECT_FALSE(any->matches(log_));
+ log_.checkFirst(3);
+}
+
+// And is able to return true
+TEST_F(LogicCreatorTest, andTrue) {
+ AllOfPtr all(load<AllOf>("{\"ALL\": ["
+ " {\"logcheck\": [0, true]},"
+ " {\"logcheck\": [1, true]},"
+ " {\"logcheck\": [2, true]}"
+ "]}"));
+ EXPECT_EQ(3, all->getSubexpressions().size());
+ EXPECT_TRUE(all->matches(log_));
+ log_.checkFirst(3);
+}
+
+// We can nest them together
+TEST_F(LogicCreatorTest, nested) {
+ AllOfPtr all(load<AllOf>("{\"ALL\": ["
+ " {\"ANY\": ["
+ " {\"logcheck\": [0, true]},"
+ " {\"logcheck\": [2, true]},"
+ " ]},"
+ " {\"logcheck\": [1, false]}"
+ "]}"));
+ EXPECT_EQ(2, all->getSubexpressions().size());
+ /*
+ * This has the same problem as load function above, and we use the
+ * same solution here.
+ */
+ ASSERT_STREQ(typeid(LogicOperator<AnyOfSpec, Log>).name(),
+ typeid(*all->getSubexpressions()[0]).name());
+ const LogicOperator<AnyOfSpec, Log>*
+ any(static_cast<const LogicOperator<AnyOfSpec, Log>*>
+ (all->getSubexpressions()[0]));
+ EXPECT_EQ(2, any->getSubexpressions().size());
+ EXPECT_FALSE(all->matches(log_));
+ log_.checkFirst(2);
+}
+
+}
diff --git a/src/lib/acl/tests/run_unittests.cc b/src/lib/acl/tests/run_unittests.cc
index 61df6cf..8dc59a2 100644
--- a/src/lib/acl/tests/run_unittests.cc
+++ b/src/lib/acl/tests/run_unittests.cc
@@ -13,11 +13,12 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <log/logger_support.h>
#include <util/unittests/run_all.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
+ isc::log::initLogger();
return (isc::util::unittests::run_all());
}
-
diff --git a/src/lib/asiodns/Makefile.am b/src/lib/asiodns/Makefile.am
index 2a6c3ac..2d246ef 100644
--- a/src/lib/asiodns/Makefile.am
+++ b/src/lib/asiodns/Makefile.am
@@ -8,13 +8,13 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
AM_CXXFLAGS = $(B10_CXXFLAGS)
-CLEANFILES = *.gcno *.gcda asiodef.h asiodef.cc
+CLEANFILES = *.gcno *.gcda asiodns_messages.h asiodns_messages.cc
# Define rule to build logging source files from message file
-asiodef.h asiodef.cc: asiodef.mes
- $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/asiodns/asiodef.mes
+asiodns_messages.h asiodns_messages.cc: asiodns_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/asiodns/asiodns_messages.mes
-BUILT_SOURCES = asiodef.h asiodef.cc
+BUILT_SOURCES = asiodns_messages.h asiodns_messages.cc
lib_LTLIBRARIES = libasiodns.la
libasiodns_la_SOURCES = dns_answer.h
@@ -26,9 +26,9 @@ libasiodns_la_SOURCES += tcp_server.cc tcp_server.h
libasiodns_la_SOURCES += udp_server.cc udp_server.h
libasiodns_la_SOURCES += io_fetch.cc io_fetch.h
-nodist_libasiodns_la_SOURCES = asiodef.cc asiodef.h
+nodist_libasiodns_la_SOURCES = asiodns_messages.cc asiodns_messages.h
-EXTRA_DIST = asiodef.mes
+EXTRA_DIST = asiodns_messages.mes
# Note: the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS)
diff --git a/src/lib/asiodns/asiodef.mes b/src/lib/asiodns/asiodef.mes
deleted file mode 100644
index 4f4090d..0000000
--- a/src/lib/asiodns/asiodef.mes
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX ASIODNS_
-$NAMESPACE isc::asiodns
-
-% FETCHCOMP upstream fetch to %1(%2) has now completed
-A debug message, this records that the upstream fetch (a query made by the
-resolver on behalf of its client) to the specified address has completed.
-
-% FETCHSTOP upstream fetch to %1(%2) has been stopped
-An external component has requested the halting of an upstream fetch. This
-is an allowed operation, and the message should only appear if debug is
-enabled.
-
-% OPENSOCK error %1 opening %2 socket to %3(%4)
-The asynchronous I/O code encountered an error when trying to open a socket
-of the specified protocol in order to send a message to the target address.
-The number of the system error that cause the problem is given in the
-message.
-
-% RECVSOCK error %1 reading %2 data from %3(%4)
-The asynchronous I/O code encountered an error when trying to read data from
-the specified address on the given protocol. The number of the system
-error that cause the problem is given in the message.
-
-% SENDSOCK error %1 sending data using %2 to %3(%4)
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
-
-% RECVTMO receive timeout while waiting for data from %1(%2)
-An upstream fetch from the specified address timed out. This may happen for
-any number of reasons and is most probably a problem at the remote server
-or a problem on the network. The message will only appear if debug is
-enabled.
-
-% UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
-
-% UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message). This message should
-not appear and may indicate an internal error. Please enter a bug report.
diff --git a/src/lib/asiodns/asiodns_messages.mes b/src/lib/asiodns/asiodns_messages.mes
new file mode 100644
index 0000000..3e11ede
--- /dev/null
+++ b/src/lib/asiodns/asiodns_messages.mes
@@ -0,0 +1,56 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::asiodns
+
+% ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed
+A debug message, this records that the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+
+% ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped
+An external component has requested the halting of an upstream fetch. This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+
+% ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The number of the system error that cause the problem is given in the
+message.
+
+% ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
+error that cause the problem is given in the message.
+
+% ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)
+An upstream fetch from the specified address timed out. This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network. The message will only appear if debug is
+enabled.
+
+% ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)
+The asynchronous I/O code encountered an error when trying send data to
+the specified address on the given protocol. The the number of the system
+error that cause the problem is given in the message.
+
+% ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+
+% ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
diff --git a/src/lib/asiodns/io_fetch.cc b/src/lib/asiodns/io_fetch.cc
index 4b2edf9..31b5f50 100644
--- a/src/lib/asiodns/io_fetch.cc
+++ b/src/lib/asiodns/io_fetch.cc
@@ -41,7 +41,7 @@
#include <log/logger.h>
#include <log/macros.h>
-#include <asiodns/asiodef.h>
+#include <asiodns/asiodns_messages.h>
#include <asiodns/io_fetch.h>
#include <util/buffer.h>
@@ -158,7 +158,7 @@ struct IOFetchData {
stopped(false),
timeout(wait),
packet(false),
- origin(ASIODNS_UNKORIGIN),
+ origin(ASIODNS_UNKNOWN_ORIGIN),
staging(),
qid(QidGenerator::getInstance().generateQid())
{}
@@ -280,7 +280,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
// Open a connection to the target system. For speed, if the operation
// is synchronous (i.e. UDP operation) we bypass the yield.
- data_->origin = ASIODNS_OPENSOCK;
+ data_->origin = ASIODNS_OPEN_SOCKET;
if (data_->socket->isOpenSynchronous()) {
data_->socket->open(data_->remote_snd.get(), *this);
} else {
@@ -290,7 +290,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
do {
// Begin an asynchronous send, and then yield. When the send completes,
// we will resume immediately after this point.
- data_->origin = ASIODNS_SENDSOCK;
+ data_->origin = ASIODNS_SEND_DATA;
CORO_YIELD data_->socket->asyncSend(data_->msgbuf->getData(),
data_->msgbuf->getLength(), data_->remote_snd.get(), *this);
@@ -313,7 +313,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
// received all the data before copying it back to the user's buffer.
// And we want to minimise the amount of copying...
- data_->origin = ASIODNS_RECVSOCK;
+ data_->origin = ASIODNS_READ_DATA;
data_->cumulative = 0; // No data yet received
data_->offset = 0; // First data into start of buffer
data_->received->clear(); // Clear the receive buffer
@@ -329,7 +329,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
// Finished with this socket, so close it. This will not generate an
// I/O error, but reset the origin to unknown in case we change this.
- data_->origin = ASIODNS_UNKORIGIN;
+ data_->origin = ASIODNS_UNKNOWN_ORIGIN;
data_->socket->close();
/// We are done
@@ -367,13 +367,13 @@ IOFetch::stop(Result result) {
data_->stopped = true;
switch (result) {
case TIME_OUT:
- LOG_DEBUG(logger, DBG_COMMON, ASIODNS_RECVTMO).
+ LOG_DEBUG(logger, DBG_COMMON, ASIODNS_READ_TIMEOUT).
arg(data_->remote_snd->getAddress().toText()).
arg(data_->remote_snd->getPort());
break;
case SUCCESS:
- LOG_DEBUG(logger, DBG_ALL, ASIODNS_FETCHCOMP).
+ LOG_DEBUG(logger, DBG_ALL, ASIODNS_FETCH_COMPLETED).
arg(data_->remote_rcv->getAddress().toText()).
arg(data_->remote_rcv->getPort());
break;
@@ -382,13 +382,13 @@ IOFetch::stop(Result result) {
// Fetch has been stopped for some other reason. This is
// allowed but as it is unusual it is logged, but with a lower
// debug level than a timeout (which is totally normal).
- LOG_DEBUG(logger, DBG_IMPORTANT, ASIODNS_FETCHSTOP).
+ LOG_DEBUG(logger, DBG_IMPORTANT, ASIODNS_FETCH_STOPPED).
arg(data_->remote_snd->getAddress().toText()).
arg(data_->remote_snd->getPort());
break;
default:
- LOG_ERROR(logger, ASIODNS_UNKRESULT).
+ LOG_ERROR(logger, ASIODNS_UNKNOWN_RESULT).
arg(data_->remote_snd->getAddress().toText()).
arg(data_->remote_snd->getPort());
}
@@ -412,10 +412,10 @@ IOFetch::stop(Result result) {
void IOFetch::logIOFailure(asio::error_code ec) {
// Should only get here with a known error code.
- assert((data_->origin == ASIODNS_OPENSOCK) ||
- (data_->origin == ASIODNS_SENDSOCK) ||
- (data_->origin == ASIODNS_RECVSOCK) ||
- (data_->origin == ASIODNS_UNKORIGIN));
+ assert((data_->origin == ASIODNS_OPEN_SOCKET) ||
+ (data_->origin == ASIODNS_SEND_DATA) ||
+ (data_->origin == ASIODNS_READ_DATA) ||
+ (data_->origin == ASIODNS_UNKNOWN_ORIGIN));
static const char* PROTOCOL[2] = {"TCP", "UDP"};
LOG_ERROR(logger, data_->origin).arg(ec.value()).
diff --git a/src/lib/asiolink/io_endpoint.h b/src/lib/asiolink/io_endpoint.h
index 756fa3b..11ea97b 100644
--- a/src/lib/asiolink/io_endpoint.h
+++ b/src/lib/asiolink/io_endpoint.h
@@ -20,6 +20,8 @@
// See the description of the namespace below.
#include <unistd.h> // for some network system calls
+#include <sys/socket.h> // for sockaddr
+
#include <functional>
#include <string>
@@ -90,6 +92,44 @@ public:
/// \brief Returns the address family of the endpoint.
virtual short getFamily() const = 0;
+ /// \brief Returns the address of the endpoint in the form of sockaddr
+ /// structure.
+ ///
+ /// The actual instance referenced by the returned value of this method
+ /// is of per address family structure: For IPv4 (AF_INET), it's
+ /// \c sockaddr_in; for IPv6 (AF_INET6), it's \c sockaddr_in6.
+ /// The corresponding port and address members of the underlying structure
+ /// will be set in the network byte order.
+ ///
+ /// This method is "redundant" in that all information to construct the
+ /// \c sockaddr is available via the other "get" methods.
+ /// It is still defined for performance sensitive applications that need
+ /// to get the address information, such as for address based access
+ /// control at a high throughput. Internally it is implemented with
+ /// minimum overhead such as data copy (this is another reason why this
+ /// method returns a reference).
+ ///
+ /// As a tradeoff, this method is more fragile; it assumes that the
+ /// underlying ASIO implementation stores the address information in
+ /// the form of \c sockaddr and it can be accessed in an efficient way.
+ /// This is the case as of this writing, but if the underlying
+ /// implementation changes this method may become much slower or its
+ /// interface may have to be changed, too.
+ ///
+ /// It is therefore discouraged for normal applications to use this
+ /// method. Unless the application is very performance sensitive, it
+ /// should use the other "get" method to retrieve specific information
+ /// of the endpoint.
+ ///
+ /// The returned reference is only valid while the corresponding
+ /// \c IOEndpoint is valid. Once it's destructed the reference will
+ /// become invalid.
+ ///
+ /// \exception None
+ /// \return Reference to a \c sockaddr structure corresponding to the
+ /// endpoint.
+ virtual const struct sockaddr& getSockAddr() const = 0;
+
bool operator==(const IOEndpoint& other) const;
bool operator!=(const IOEndpoint& other) const;
@@ -121,3 +161,7 @@ public:
} // namespace asiolink
} // namespace isc
#endif // __IO_ENDPOINT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/asiolink/tcp_endpoint.h b/src/lib/asiolink/tcp_endpoint.h
index 3e420f3..a54f6b2 100644
--- a/src/lib/asiolink/tcp_endpoint.h
+++ b/src/lib/asiolink/tcp_endpoint.h
@@ -84,6 +84,10 @@ public:
return (asio_endpoint_.address());
}
+ virtual const struct sockaddr& getSockAddr() const {
+ return (*asio_endpoint_.data());
+ }
+
virtual uint16_t getPort() const {
return (asio_endpoint_.port());
}
@@ -113,3 +117,7 @@ private:
} // namespace asiolink
} // namespace isc
#endif // __TCP_ENDPOINT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/asiolink/tests/io_endpoint_unittest.cc b/src/lib/asiolink/tests/io_endpoint_unittest.cc
index ce21fde..f0279d1 100644
--- a/src/lib/asiolink/tests/io_endpoint_unittest.cc
+++ b/src/lib/asiolink/tests/io_endpoint_unittest.cc
@@ -15,14 +15,25 @@
#include <config.h>
#include <gtest/gtest.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netdb.h>
+#include <string.h>
+
+#include <boost/shared_ptr.hpp>
+
#include <asiolink/io_endpoint.h>
#include <asiolink/io_error.h>
+using boost::shared_ptr;
using namespace isc::asiolink;
+namespace {
+typedef shared_ptr<const IOEndpoint> ConstIOEndpointPtr;
+
TEST(IOEndpointTest, createUDPv4) {
- const IOEndpoint* ep;
- ep = IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 53210);
+ ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.1"), 53210));
EXPECT_EQ("192.0.2.1", ep->getAddress().toText());
EXPECT_EQ(53210, ep->getPort());
EXPECT_EQ(AF_INET, ep->getFamily());
@@ -31,8 +42,8 @@ TEST(IOEndpointTest, createUDPv4) {
}
TEST(IOEndpointTest, createTCPv4) {
- const IOEndpoint* ep;
- ep = IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 5301);
+ ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.1"), 5301));
EXPECT_EQ("192.0.2.1", ep->getAddress().toText());
EXPECT_EQ(5301, ep->getPort());
EXPECT_EQ(AF_INET, ep->getFamily());
@@ -41,8 +52,9 @@ TEST(IOEndpointTest, createTCPv4) {
}
TEST(IOEndpointTest, createUDPv6) {
- const IOEndpoint* ep;
- ep = IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1234"), 5302);
+ ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1234"),
+ 5302));
EXPECT_EQ("2001:db8::1234", ep->getAddress().toText());
EXPECT_EQ(5302, ep->getPort());
EXPECT_EQ(AF_INET6, ep->getFamily());
@@ -51,8 +63,9 @@ TEST(IOEndpointTest, createUDPv6) {
}
TEST(IOEndpointTest, createTCPv6) {
- const IOEndpoint* ep;
- ep = IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1234"), 5303);
+ ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1234"),
+ 5303));
EXPECT_EQ("2001:db8::1234", ep->getAddress().toText());
EXPECT_EQ(5303, ep->getPort());
EXPECT_EQ(AF_INET6, ep->getFamily());
@@ -61,23 +74,55 @@ TEST(IOEndpointTest, createTCPv6) {
}
TEST(IOEndpointTest, equality) {
- std::vector<const IOEndpoint *> epv;
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1234"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1234"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1234"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1234"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1235"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1235"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1235"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1235"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"), 5304));
+ std::vector<ConstIOEndpointPtr> epv;
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1234"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1234"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1234"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1234"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1235"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1235"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1235"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1235"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.1"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.1"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.1"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.1"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.2"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.2"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.2"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.2"), 5304)));
for (size_t i = 0; i < epv.size(); ++i) {
for (size_t j = 0; j < epv.size(); ++j) {
@@ -92,23 +137,55 @@ TEST(IOEndpointTest, equality) {
// Create a second array with exactly the same values. We use create()
// again to make sure we get different endpoints
- std::vector<const IOEndpoint *> epv2;
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1234"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1234"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1234"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1234"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1235"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1235"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1235"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1235"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"), 5304));
+ std::vector<ConstIOEndpointPtr> epv2;
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1234"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1234"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1234"), 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1234"), 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1235"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1235"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1235"), 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1235"), 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.1"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.1"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"),
+ 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"),
+ 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"),
+ 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"),
+ 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"),
+ 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"),
+ 5304)));
for (size_t i = 0; i < epv.size(); ++i) {
EXPECT_TRUE(*epv[i] == *epv2[i]);
@@ -122,3 +199,46 @@ TEST(IOEndpointTest, createIPProto) {
IOError);
}
+void
+sockAddrMatch(const struct sockaddr& actual_sa,
+ const char* const expected_addr_text,
+ const char* const expected_port_text)
+{
+ struct addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_DGRAM; // this shouldn't matter
+ hints.ai_flags = AI_NUMERICHOST | AI_NUMERICSERV;
+
+ struct addrinfo* res;
+ ASSERT_EQ(0, getaddrinfo(expected_addr_text, expected_port_text, &hints,
+ &res));
+ EXPECT_EQ(res->ai_family, actual_sa.sa_family);
+#ifdef HAVE_SA_LEN
+ // ASIO doesn't seem to set sa_len, so we set it to the expected value
+ res->ai_addr->sa_len = actual_sa.sa_len;
+#endif
+ EXPECT_EQ(0, memcmp(res->ai_addr, &actual_sa, res->ai_addrlen));
+ free(res);
+}
+
+TEST(IOEndpointTest, getSockAddr) {
+ // UDP/IPv4
+ ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.1"), 53210));
+ sockAddrMatch(ep->getSockAddr(), "192.0.2.1", "53210");
+
+ // UDP/IPv6
+ ep.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::53"), 53));
+ sockAddrMatch(ep->getSockAddr(), "2001:db8::53", "53");
+
+ // TCP/IPv4
+ ep.reset(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"), 53211));
+ sockAddrMatch(ep->getSockAddr(), "192.0.2.2", "53211");
+
+ // TCP/IPv6
+ ep.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::5300"), 35));
+ sockAddrMatch(ep->getSockAddr(), "2001:db8::5300", "35");
+}
+
+}
diff --git a/src/lib/asiolink/udp_endpoint.h b/src/lib/asiolink/udp_endpoint.h
index 5c8a1fe..c5ba3bd 100644
--- a/src/lib/asiolink/udp_endpoint.h
+++ b/src/lib/asiolink/udp_endpoint.h
@@ -84,6 +84,10 @@ public:
return (asio_endpoint_.address());
}
+ virtual const struct sockaddr& getSockAddr() const {
+ return (*asio_endpoint_.data());
+ }
+
virtual uint16_t getPort() const {
return (asio_endpoint_.port());
}
@@ -113,3 +117,7 @@ private:
} // namespace asiolink
} // namespace isc
#endif // __UDP_ENDPOINT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/cc/data.h b/src/lib/cc/data.h
index 0a363f4..5c731e6 100644
--- a/src/lib/cc/data.h
+++ b/src/lib/cc/data.h
@@ -479,7 +479,7 @@ public:
return (true);
}
using Element::setValue;
- bool setValue(std::map<std::string, ConstElementPtr>& v) {
+ bool setValue(const std::map<std::string, ConstElementPtr>& v) {
m = v;
return (true);
}
diff --git a/src/lib/config/Makefile.am b/src/lib/config/Makefile.am
index 52337ad..500ff12 100644
--- a/src/lib/config/Makefile.am
+++ b/src/lib/config/Makefile.am
@@ -6,10 +6,10 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/log -I$(top_builddir)/src/lib/log
AM_CPPFLAGS += $(BOOST_INCLUDES)
# Define rule to build logging source files from message file
-configdef.h configdef.cc: configdef.mes
- $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/config/configdef.mes
+config_messages.h config_messages.cc: config_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/config/config_messages.mes
-BUILT_SOURCES = configdef.h configdef.cc
+BUILT_SOURCES = config_messages.h config_messages.cc
lib_LTLIBRARIES = libcfgclient.la
libcfgclient_la_SOURCES = config_data.h config_data.cc
@@ -17,9 +17,9 @@ libcfgclient_la_SOURCES += module_spec.h module_spec.cc
libcfgclient_la_SOURCES += ccsession.cc ccsession.h
libcfgclient_la_SOURCES += config_log.h config_log.cc
-nodist_libcfgclient_la_SOURCES = configdef.h configdef.cc
+nodist_libcfgclient_la_SOURCES = config_messages.h config_messages.cc
# The message file should be in the distribution.
-EXTRA_DIST = configdef.mes
+EXTRA_DIST = config_messages.mes
-CLEANFILES = *.gcno *.gcda configdef.h configdef.cc
+CLEANFILES = *.gcno *.gcda config_messages.h config_messages.cc
diff --git a/src/lib/config/ccsession.cc b/src/lib/config/ccsession.cc
index dd2be3d..6b094ec 100644
--- a/src/lib/config/ccsession.cc
+++ b/src/lib/config/ccsession.cc
@@ -23,6 +23,7 @@
#include <fstream>
#include <sstream>
#include <cerrno>
+#include <set>
#include <boost/bind.hpp>
#include <boost/foreach.hpp>
@@ -38,6 +39,7 @@
#include <log/logger_support.h>
#include <log/logger_specification.h>
#include <log/logger_manager.h>
+#include <log/logger_name.h>
using namespace std;
@@ -213,7 +215,8 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
ConstElementPtr logger,
const ConfigData& config_data)
{
- const std::string lname = logger->get("name")->stringValue();
+ std::string lname = logger->get("name")->stringValue();
+
ConstElementPtr severity_el = getValueOrDefault(logger,
"severity", config_data,
"loggers/severity");
@@ -246,6 +249,50 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
} // end anonymous namespace
+
+ConstElementPtr
+getRelatedLoggers(ConstElementPtr loggers) {
+ // Keep a list of names for easier lookup later
+ std::set<std::string> our_names;
+ const std::string& root_name = isc::log::getRootLoggerName();
+
+ ElementPtr result = isc::data::Element::createList();
+
+ BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
+ const std::string cur_name = cur_logger->get("name")->stringValue();
+ if (cur_name == root_name || cur_name.find(root_name + ".") == 0) {
+ our_names.insert(cur_name);
+ result->add(cur_logger);
+ }
+ }
+
+ // now find the * names
+ BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
+ std::string cur_name = cur_logger->get("name")->stringValue();
+ // if name is '*', or starts with '*.', replace * with root
+ // logger name
+ if (cur_name == "*" || cur_name.length() > 1 &&
+ cur_name[0] == '*' && cur_name[1] == '.') {
+
+ cur_name = root_name + cur_name.substr(1);
+ // now add it to the result list, but only if a logger with
+ // that name was not configured explicitely
+ if (our_names.find(cur_name) == our_names.end()) {
+ // we substitute the name here already, but as
+ // we are dealing with consts, we copy the data
+ ElementPtr new_logger(Element::createMap());
+ // since we'll only be updating one first-level element,
+ // and we return as const again, a shallow map copy is
+ // enough
+ new_logger->setValue(cur_logger->mapValue());
+ new_logger->set("name", Element::create(cur_name));
+ result->add(new_logger);
+ }
+ }
+ }
+ return result;
+}
+
void
default_logconfig_handler(const std::string& module_name,
ConstElementPtr new_config,
@@ -255,8 +302,9 @@ default_logconfig_handler(const std::string& module_name,
std::vector<isc::log::LoggerSpecification> specs;
if (new_config->contains("loggers")) {
+ ConstElementPtr loggers = getRelatedLoggers(new_config->get("loggers"));
BOOST_FOREACH(ConstElementPtr logger,
- new_config->get("loggers")->listValue()) {
+ loggers->listValue()) {
readLoggersConf(specs, logger, config_data);
}
}
@@ -274,7 +322,7 @@ ModuleCCSession::readModuleSpecification(const std::string& filename) {
// this file should be declared in a @something@ directive
file.open(filename.c_str());
if (!file) {
- LOG_ERROR(config_logger, CONFIG_FOPEN_ERR).arg(filename).arg(strerror(errno));
+ LOG_ERROR(config_logger, CONFIG_OPEN_FAIL).arg(filename).arg(strerror(errno));
isc_throw(CCSessionInitError, strerror(errno));
}
@@ -284,7 +332,7 @@ ModuleCCSession::readModuleSpecification(const std::string& filename) {
LOG_ERROR(config_logger, CONFIG_JSON_PARSE).arg(filename).arg(pe.what());
isc_throw(CCSessionInitError, pe.what());
} catch (const ModuleSpecError& dde) {
- LOG_ERROR(config_logger, CONFIG_MODULE_SPEC).arg(filename).arg(dde.what());
+ LOG_ERROR(config_logger, CONFIG_MOD_SPEC_FORMAT).arg(filename).arg(dde.what());
isc_throw(CCSessionInitError, dde.what());
}
file.close();
@@ -334,7 +382,7 @@ ModuleCCSession::ModuleCCSession(
int rcode;
ConstElementPtr err = parseAnswer(rcode, answer);
if (rcode != 0) {
- LOG_ERROR(config_logger, CONFIG_MANAGER_MOD_SPEC).arg(answer->str());
+ LOG_ERROR(config_logger, CONFIG_MOD_SPEC_REJECT).arg(answer->str());
isc_throw(CCSessionInitError, answer->str());
}
@@ -348,7 +396,7 @@ ModuleCCSession::ModuleCCSession(
if (rcode == 0) {
handleConfigUpdate(new_config);
} else {
- LOG_ERROR(config_logger, CONFIG_MANAGER_CONFIG).arg(new_config->str());
+ LOG_ERROR(config_logger, CONFIG_GET_FAIL).arg(new_config->str());
isc_throw(CCSessionInitError, answer->str());
}
}
diff --git a/src/lib/config/ccsession.h b/src/lib/config/ccsession.h
index 0d4b7f3..7dc34ba 100644
--- a/src/lib/config/ccsession.h
+++ b/src/lib/config/ccsession.h
@@ -373,8 +373,41 @@ default_logconfig_handler(const std::string& module_name,
isc::data::ConstElementPtr new_config,
const ConfigData& config_data);
-}
-}
+
+/// \brief Returns the loggers related to this module
+///
+/// This function does two things;
+/// - it drops the configuration parts for loggers for other modules
+/// - it replaces the '*' in the name of the loggers by the name of
+/// this module, but *only* if the expanded name is not configured
+/// explicitely
+///
+/// Examples: if this is the module b10-resolver,
+/// For the config names ['*', 'b10-auth']
+/// The '*' is replaced with 'b10-resolver', and this logger is used.
+/// 'b10-auth' is ignored (of course, it will not be in the b10-auth
+/// module).
+///
+/// For ['*', 'b10-resolver']
+/// The '*' is ignored, and only 'b10-resolver' is used.
+///
+/// For ['*.reslib', 'b10-resolver']
+/// Or ['b10-resolver.reslib', '*']
+/// Both are used, where the * will be expanded to b10-resolver
+///
+/// \note This is a public function at this time, but mostly for
+/// the purposes of testing. Once we can directly test what loggers
+/// are running, this function may be moved to the unnamed namespace
+///
+/// \param loggers the original 'loggers' config list
+/// \return ListElement containing only loggers relevant for this
+/// module, where * is replaced by the root logger name
+isc::data::ConstElementPtr
+getRelatedLoggers(isc::data::ConstElementPtr loggers);
+
+} // namespace config
+
+} // namespace isc
#endif // __CCSESSION_H
// Local Variables:
diff --git a/src/lib/config/config_log.h b/src/lib/config/config_log.h
index 22e5a5c..0063855 100644
--- a/src/lib/config/config_log.h
+++ b/src/lib/config/config_log.h
@@ -16,7 +16,7 @@
#define __CONFIG_LOG__H
#include <log/macros.h>
-#include "configdef.h"
+#include "config_messages.h"
namespace isc {
namespace config {
diff --git a/src/lib/config/config_messages.mes b/src/lib/config/config_messages.mes
new file mode 100644
index 0000000..660ab9a
--- /dev/null
+++ b/src/lib/config/config_messages.mes
@@ -0,0 +1,59 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::config
+
+% CONFIG_CCSESSION_MSG error in CC session message: %1
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+
+% CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+
+The most likely cause of this error is a programming error. Please raise
+a bug report.
+
+% CONFIG_GET_FAIL error getting configuration from cfgmgr: %1
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+
+% CONFIG_JSON_PARSE JSON parse error in %1: %2
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+
+% CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+
+% CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+
+% CONFIG_OPEN_FAIL error opening %1: %2
+There was an error opening the given file. The reason for the failure
+is included in the message.
diff --git a/src/lib/config/configdef.mes b/src/lib/config/configdef.mes
deleted file mode 100644
index be39073..0000000
--- a/src/lib/config/configdef.mes
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX CONFIG_
-$NAMESPACE isc::config
-
-% FOPEN_ERR error opening %1: %2
-There was an error opening the given file.
-
-% JSON_PARSE JSON parse error in %1: %2
-There was a parse error in the JSON file. The given file does not appear
-to be in valid JSON format. Please verify that the filename is correct
-and that the contents are valid JSON.
-
-% MODULE_SPEC module specification error in %1: %2
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
-
-% MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
-
-% MANAGER_CONFIG error getting configuration from cfgmgr: %1
-The configuration manager returned an error when this module requested
-the configuration. The full error message answer from the configuration
-manager is appended to the log error. The most likely cause is that
-the module is of a different (command specification) version than the
-running configuration manager.
-
-% CCSESSION_MSG error in CC session message: %1
-There was a problem with an incoming message on the command and control
-channel. The message does not appear to be a valid command, and is
-missing a required element or contains an unknown data format. This
-most likely means that another BIND10 module is sending a bad message.
-The message itself is ignored by this module.
-
-% CCSESSION_MSG_INTERNAL error handling CC session message: %1
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
diff --git a/src/lib/config/tests/ccsession_unittests.cc b/src/lib/config/tests/ccsession_unittests.cc
index e5fe049..e1a4f9d 100644
--- a/src/lib/config/tests/ccsession_unittests.cc
+++ b/src/lib/config/tests/ccsession_unittests.cc
@@ -24,6 +24,8 @@
#include <config/tests/data_def_unittests_config.h>
+#include <log/logger_name.h>
+
using namespace isc::data;
using namespace isc::config;
using namespace isc::cc;
@@ -632,4 +634,64 @@ TEST_F(CCSessionTest, doubleStartWithAddRemoteConfig) {
EXPECT_THROW(mccs.addRemoteConfig(ccspecfile("spec2.spec")),
FakeSession::DoubleRead);
}
+
+namespace {
+void doRelatedLoggersTest(const char* input, const char* expected) {
+ ConstElementPtr all_conf = isc::data::Element::fromJSON(input);
+ ConstElementPtr expected_conf = isc::data::Element::fromJSON(expected);
+ EXPECT_EQ(*expected_conf, *isc::config::getRelatedLoggers(all_conf));
+}
+} // end anonymous namespace
+
+TEST(LogConfigTest, relatedLoggersTest) {
+ // make sure logger configs for 'other' programs are ignored,
+ // and that * is substituted correctly
+ // The default root logger name is "bind10"
+ doRelatedLoggersTest("[{ \"name\": \"other_module\" }]",
+ "[]");
+ doRelatedLoggersTest("[{ \"name\": \"other_module.somelib\" }]",
+ "[]");
+ doRelatedLoggersTest("[{ \"name\": \"bind10_other\" }]",
+ "[]");
+ doRelatedLoggersTest("[{ \"name\": \"bind10_other.somelib\" }]",
+ "[]");
+ doRelatedLoggersTest("[ { \"name\": \"other_module\" },"
+ " { \"name\": \"bind10\" }]",
+ "[ { \"name\": \"bind10\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"bind10\" }]",
+ "[ { \"name\": \"bind10\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"bind10.somelib\" }]",
+ "[ { \"name\": \"bind10.somelib\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
+ " { \"name\": \"bind10.somelib\" }]",
+ "[ { \"name\": \"bind10.somelib\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
+ " { \"name\": \"bind10\" },"
+ " { \"name\": \"bind10.somelib\" }]",
+ "[ { \"name\": \"bind10\" },"
+ " { \"name\": \"bind10.somelib\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"*\" }]",
+ "[ { \"name\": \"bind10\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"*.somelib\" }]",
+ "[ { \"name\": \"bind10.somelib\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
+ " { \"name\": \"bind10\", \"severity\": \"WARN\"}]",
+ "[ { \"name\": \"bind10\", \"severity\": \"WARN\"} ]");
+ doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
+ " { \"name\": \"some_module\", \"severity\": \"WARN\"}]",
+ "[ { \"name\": \"bind10\", \"severity\": \"DEBUG\"} ]");
+
+ // make sure 'bad' things like '*foo.x' or '*lib' are ignored
+ // (cfgmgr should have already caught it in the logconfig plugin
+ // check, and is responsible for reporting the error)
+ doRelatedLoggersTest("[ { \"name\": \"*foo\" }]",
+ "[ ]");
+ doRelatedLoggersTest("[ { \"name\": \"*foo.bar\" }]",
+ "[ ]");
+ doRelatedLoggersTest("[ { \"name\": \"*foo\" },"
+ " { \"name\": \"*foo.lib\" },"
+ " { \"name\": \"bind10\" } ]",
+ "[ { \"name\": \"bind10\" } ]");
+}
+
}
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index e028186..457d5b0 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -7,7 +7,7 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
AM_CXXFLAGS = $(B10_CXXFLAGS)
-CLEANFILES = *.gcno *.gcda messagedef.h messagedef.cc
+CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
lib_LTLIBRARIES = libdatasrc.la
libdatasrc_la_SOURCES = data_source.h data_source.cc
@@ -21,15 +21,15 @@ libdatasrc_la_SOURCES += memory_datasrc.h memory_datasrc.cc
libdatasrc_la_SOURCES += zone.h
libdatasrc_la_SOURCES += result.h
libdatasrc_la_SOURCES += logger.h logger.cc
-nodist_libdatasrc_la_SOURCES = messagedef.h messagedef.cc
+nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
-BUILT_SOURCES = messagedef.h messagedef.cc
-messagedef.h messagedef.cc: Makefile messagedef.mes
- $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/messagedef.mes
+BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
+datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
-EXTRA_DIST = messagedef.mes
+EXTRA_DIST = datasrc_messages.mes
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
new file mode 100644
index 0000000..c692364
--- /dev/null
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -0,0 +1,493 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::datasrc
+
+# \brief Messages for the data source library
+
+% DATASRC_CACHE_CREATE creating the hotspot cache
+Debug information that the hotspot cache was created at startup.
+
+% DATASRC_CACHE_DESTROY destroying the hotspot cache
+Debug information. The hotspot cache is being destroyed.
+
+% DATASRC_CACHE_DISABLE disabling the cache
+The hotspot cache is disabled from now on. It is not going to store
+information or return anything.
+
+% DATASRC_CACHE_ENABLE enabling the cache
+The hotspot cache is enabled from now on.
+
+% DATASRC_CACHE_EXPIRED the item '%1' is expired
+Debug information. There was an attempt to look up an item in the hotspot
+cache. And the item was actually there, but it was too old, so it was removed
+instead and nothing is reported (the external behaviour is the same as with
+CACHE_NOT_FOUND).
+
+% DATASRC_CACHE_FOUND the item '%1' was found
+Debug information. An item was successfully looked up in the hotspot cache.
+
+% DATASRC_CACHE_FULL cache is full, dropping oldest
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+
+% DATASRC_CACHE_INSERT inserting item '%1' into the cache
+Debug information. It means a new item is being inserted into the hotspot
+cache.
+
+% DATASRC_CACHE_NOT_FOUND the item '%1' was not found
+Debug information. It was attempted to look up an item in the hotspot cache,
+but it is not there.
+
+% DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found. The old instance will be
+removed. This should be directly followed by CACHE_REMOVE.
+
+% DATASRC_CACHE_REMOVE removing '%1' from the cache
+Debug information. An item is being removed from the hotspot cache.
+
+% DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+
+% DATASRC_DO_QUERY handling query for '%1/%2'
+Debug information. We're processing some internal query for given name and
+type.
+
+% DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
+Debug information. An RRset is being added to the in-memory data source.
+
+% DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'
+Debug information. Some special marks above each * in wildcard name are needed.
+They are being added now for this name.
+
+% DATASRC_MEM_ADD_ZONE adding zone '%1/%2'
+Debug information. A zone is being added into the in-memory data source.
+
+% DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+
+% DATASRC_MEM_CNAME CNAME at the domain '%1'
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+
+% DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some other data to CNAME.
+
+% DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+
+% DATASRC_MEM_CREATE creating zone '%1' in '%2' class
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+
+% DATASRC_MEM_DELEG_FOUND delegation found at '%1'
+Debug information. A delegation point was found above the requested record.
+
+% DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class
+Debug information. A zone from in-memory data source is being destroyed.
+
+% DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way. This may lead to redirection to a different domain and
+stop the search.
+
+% DATASRC_MEM_DNAME_FOUND DNAME found at '%1'
+Debug information. A DNAME was found instead of the requested information.
+
+% DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'
+It was requested for DNAME and NS records to be put into the same domain
+which is not the apex (the top of the zone). This is forbidden by RFC
+2672, section 3. This indicates a problem with provided data.
+
+% DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+
+% DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'
+An RRset is being inserted into in-memory data source for a second time. The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+
+% DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+
+% DATASRC_MEM_FIND find '%1/%2'
+Debug information. A search for the requested RRset is being started.
+
+% DATASRC_MEM_FIND_ZONE looking for zone '%1'
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+
+% DATASRC_MEM_LOAD loading zone '%1' from file '%2'
+Debug information. The content of master file is being loaded into the memory.
+
+% DATASRC_MEM_NOTFOUND requested domain '%1' not found
+Debug information. The requested domain does not exist.
+
+% DATASRC_MEM_NS_ENCOUNTERED encountered a NS
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+
+% DATASRC_MEM_NXRRSET no such type '%1' at '%2'
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+
+% DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+
+% DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+
+% DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+
+% DATASRC_MEM_SUCCESS query for '%1/%2' successful
+Debug information. The requested record was found.
+
+% DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+
+% DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+
+% DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here. This
+behaviour is specified by RFC 1034, section 4.3.3
+
+% DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'
+The software refuses to load DNAME records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+
+% DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'
+The software refuses to load NS records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+
+% DATASRC_META_ADD adding a data source into meta data source
+Debug information. Yet another data source is being added into the meta data
+source. (probably at startup or reconfiguration)
+
+% DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
+It was attempted to add a data source into a meta data source. But their
+classes do not match.
+
+% DATASRC_META_REMOVE removing data source from meta data source
+Debug information. A data source is being removed from meta data source.
+
+% DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'
+Debug information. A NSEC record covering this zone is being added.
+
+% DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+
+% DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message
+Debug information. An RRset is being added to the response message.
+
+% DATASRC_QUERY_ADD_SOA adding SOA of '%1'
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+
+% DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+
+% DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+
+% DATASRC_QUERY_CACHED data for %1/%2 found in cache
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+
+% DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+
+% DATASRC_QUERY_COPY_AUTH copying authoritative section into message
+Debug information. The whole referral information is being copied into the
+response message.
+
+% DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+
+% DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty
+There was an CNAME and it was being followed. But it contains no records,
+so there's nowhere to go. There will be no answer. This indicates a problem
+with supplied data.
+We tried to follow
+
+% DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+
+% DATASRC_QUERY_FAIL query failed
+Some subtask of query processing failed. The reason should have been reported
+already. We are returning SERVFAIL.
+
+% DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'
+Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
+for it already), so it's being followed.
+
+% DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+
+% DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+
+% DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+
+% DATASRC_QUERY_INVALID_OP invalid query operation requested
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+
+% DATASRC_QUERY_IS_AUTH auth query (%1/%2)
+Debug information. The last DO_QUERY is an auth query.
+
+% DATASRC_QUERY_IS_GLUE glue query (%1/%2)
+Debug information. The last DO_QUERY is query for glue addresses.
+
+% DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
+Debug information. The last DO_QUERY is query for addresses that are not
+glue.
+
+% DATASRC_QUERY_IS_REF query for referral (%1/%2)
+Debug information. The last DO_QUERY is query for referral information.
+
+% DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)
+Debug information. The last DO_QUERY is a simple query.
+
+% DATASRC_QUERY_MISPLACED_TASK task of this type should not be here
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+
+% DATASRC_QUERY_MISSING_NS missing NS records for '%1'
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+
+% DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+
+% DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+
+% DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+
+% DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+
+% DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+
+% DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+
+% DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+
+% DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class
+Debug information. A sure query is being processed now.
+
+% DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+
+% DATASRC_QUERY_REF_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+
+% DATASRC_QUERY_RRSIG unable to answer RRSIG query
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+
+% DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+
+% DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
+Debug information. While answering a query, a DNAME was met. The DNAME itself
+will be returned, but along with it a CNAME for clients which don't understand
+DNAMEs will be synthesized.
+
+% DATASRC_QUERY_TASK_FAIL task failed with %1
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+
+% DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+
+% DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+
+% DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+
+% DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+
+% DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record. The code is 1 for error and 2 for not implemented.
+
+% DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it. The code is 1 for error, 2 for not implemented.
+
+% DATASRC_SQLITE_CLOSE closing SQLite database
+Debug information. The SQLite data source is closing the database file.
+% DATASRC_SQLITE_CREATE SQLite data source created
+Debug information. An instance of SQLite data source is being created.
+
+% DATASRC_SQLITE_DESTROY SQLite data source destroyed
+Debug information. An instance of SQLite data source is being destroyed.
+
+% DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'
+Debug information. The SQLite data source is trying to identify which zone
+should hold this domain.
+
+% DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
+no such zone in our data.
+
+% DATASRC_SQLITE_FIND looking for RRset '%1/%2'
+Debug information. The SQLite data source is looking up a resource record
+set.
+
+% DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'
+Debug information. The data source is looking up the addresses for given
+domain name.
+
+% DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+
+% DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'
+Debug information. The SQLite data source is looking up an exact resource
+record.
+
+% DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+
+% DATASRC_SQLITE_FINDREC looking for record '%1/%2'
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+
+% DATASRC_SQLITE_FINDREF looking for referral at '%1'
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+
+% DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')
+The SQLite data source was trying to identify if there's a referral. But
+it contains different class than the query was for.
+
+% DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+
+% DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+
+% DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+
+% DATASRC_SQLITE_OPEN opening SQLite database '%1'
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+
+% DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'
+Debug information. We're trying to look up name preceding the supplied one.
+
+% DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
+The SQLite data source tried to identify name preceding this one. But this
+one is not contained in any zone in the data source.
+
+% DATASRC_SQLITE_SETUP setting up SQLite database
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema. It'll still contain
+no data, but it will be ready for use.
+
+% DATASRC_STATIC_BAD_CLASS static data source can handle CH only
+For some reason, someone asked the static data source a query that is not in
+the CH class.
+
+% DATASRC_STATIC_CREATE creating the static datasource
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+
+% DATASRC_STATIC_FIND looking for '%1/%2'
+Debug information. This resource record set is being looked up in the static
+data source.
+
+% DATASRC_UNEXPECTED_QUERY_STATE unexpected query state
+This indicates a programming error. An internal task of unknown type was
+generated.
+
diff --git a/src/lib/datasrc/logger.h b/src/lib/datasrc/logger.h
index 7c2828d..ac5d50b 100644
--- a/src/lib/datasrc/logger.h
+++ b/src/lib/datasrc/logger.h
@@ -16,7 +16,7 @@
#define __DATASRC_LOGGER_H
#include <log/macros.h>
-#include <datasrc/messagedef.h>
+#include <datasrc/datasrc_messages.h>
/// \file logger.h
/// \brief Data Source library global logger
diff --git a/src/lib/datasrc/messagedef.mes b/src/lib/datasrc/messagedef.mes
deleted file mode 100644
index dedd2ad..0000000
--- a/src/lib/datasrc/messagedef.mes
+++ /dev/null
@@ -1,494 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX DATASRC_
-$NAMESPACE isc::datasrc
-
-# \brief Messages for the data source library
-
-% CACHE_CREATE creating the hotspot cache
-Debug information that the hotspot cache was created at startup.
-
-% CACHE_DESTROY destroying the hotspot cache
-Debug information. The hotspot cache is being destroyed.
-
-% CACHE_INSERT inserting item '%1' into the cache
-Debug information. It means a new item is being inserted into the hotspot
-cache.
-
-% CACHE_OLD_FOUND older instance of cache item found, replacing
-Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
-
-% CACHE_FULL cache is full, dropping oldest
-Debug information. After inserting an item into the hotspot cache, the
-maximum number of items was exceeded, so the least recently used item will
-be dropped. This should be directly followed by CACHE_REMOVE.
-
-% CACHE_REMOVE removing '%1' from the cache
-Debug information. An item is being removed from the hotspot cache.
-
-% CACHE_NOT_FOUND the item '%1' was not found
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
-
-% CACHE_FOUND the item '%1' was found
-Debug information. An item was successfully looked up in the hotspot cache.
-
-% CACHE_EXPIRED the item '%1' is expired
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
-
-% CACHE_SLOTS setting the cache size to '%1', dropping '%2' items
-The maximum allowed number of items of the hotspot cache is set to the given
-number. If there are too many, some of them will be dropped. The size of 0
-means no limit.
-
-% CACHE_ENABLE enabling the cache
-The hotspot cache is enabled from now on.
-
-% CACHE_DISABLE disabling the cache
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
-
-% QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
-
-% QUERY_EMPTY_DNAME the DNAME on '%1' is empty
-During an attempt to synthesize CNAME from this DNAME it was discovered the
-DNAME is empty (it has no records). This indicates problem with supplied data.
-
-% QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'
-Debug information. While processing a query, a NS record was met. It
-references the mentioned address, so A/AAAA records for it are looked up
-and put it into the additional section.
-
-% QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
-Debug information. While processing a query, a MX record was met. It
-references the mentioned address, so A/AAAA records for it are looked up
-and put it into the additional section.
-
-% QUERY_FOLLOW_CNAME following CNAME at '%1'
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
-
-% QUERY_EMPTY_CNAME CNAME at '%1' is empty
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
-
-% QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'
-A CNAME led to another CNAME and it led to another, and so on. After 16
-CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
-might possibly be a loop as well. Note that some of the CNAMEs might have
-been synthesized from DNAMEs. This indicates problem with supplied data.
-
-% QUERY_CHECK_CACHE checking cache for '%1/%2'
-Debug information. While processing a query, lookup to the hotspot cache
-is being made.
-
-% QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)
-Debug information. The hotspot cache is ignored for ANY queries for consistency
-reasons.
-
-% QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)
-Debug information. The hotspot cache is ignored for authoritative ANY queries
-for consistency reasons.
-
-% DO_QUERY handling query for '%1/%2'
-Debug information. We're processing some internal query for given name and
-type.
-
-% QUERY_NO_ZONE no zone containing '%1' in class '%2'
-Lookup of domain failed because the data have no zone that contain the
-domain. Maybe someone sent a query to the wrong server for some reason.
-
-% QUERY_CACHED data for %1/%2 found in cache
-Debug information. The requested data were found in the hotspot cache, so
-no query is sent to the real data source.
-
-% QUERY_IS_SIMPLE simple query (%1/%2)
-Debug information. The last DO_QUERY is a simple query.
-
-% QUERY_IS_AUTH auth query (%1/%2)
-Debug information. The last DO_QUERY is an auth query.
-
-% QUERY_IS_GLUE glue query (%1/%2)
-Debug information. The last DO_QUERY is query for glue addresses.
-
-% QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
-Debug information. The last DO_QUERY is query for addresses that are not
-glue.
-
-% QUERY_IS_REF query for referral (%1/%2)
-Debug information. The last DO_QUERY is query for referral information.
-
-% QUERY_SIMPLE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the simple query. 1 means some
-error, 2 is not implemented. The data source should have logged the specific
-error already.
-
-% QUERY_AUTH_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the authoritative query. 1 means
-some error, 2 is not implemented. The data source should have logged the
-specific error already.
-
-% QUERY_GLUE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the glue query. 1 means some error,
-2 is not implemented. The data source should have logged the specific error
-already.
-
-% QUERY_NOGLUE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the no-glue query. 1 means some
-error, 2 is not implemented. The data source should have logged the specific
-error already.
-
-% QUERY_REF_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the query for referral information.
-1 means some error, 2 is not implemented. The data source should have logged
-the specific error already.
-
-% QUERY_INVALID_OP invalid query operation requested
-This indicates a programmer error. The DO_QUERY was called with unknown
-operation code.
-
-% QUERY_ADD_RRSET adding RRset '%1/%2' to message
-Debug information. An RRset is being added to the response message.
-
-% QUERY_COPY_AUTH copying authoritative section into message
-Debug information. The whole referral information is being copied into the
-response message.
-
-% QUERY_DELEGATION looking for delegation on the path to '%1'
-Debug information. The software is trying to identify delegation points on the
-way down to the given domain.
-
-% QUERY_ADD_SOA adding SOA of '%1'
-Debug information. A SOA record of the given zone is being added to the
-authority section of the response message.
-
-% QUERY_ADD_NSEC adding NSEC record for '%1'
-Debug information. A NSEC record covering this zone is being added.
-
-% QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'
-Debug information. A NSEC3 record for the given zone is being added to the
-response message.
-
-% QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone
-An attempt to add a NSEC3 record into the message failed, because the zone does
-not have any DS record. This indicates problem with the provided data.
-
-% QUERY_NO_DS_NSEC there's no DS record in the '%1' zone
-An attempt to add a NSEC record into the message failed, because the zone does
-not have any DS record. This indicates problem with the provided data.
-
-% QUERY_WILDCARD looking for a wildcard covering '%1'
-Debug information. A direct match wasn't found, so a wildcard covering the
-domain is being looked for now.
-
-% QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)
-While processing a wildcard, it wasn't possible to prove nonexistence of the
-given domain or record. The code is 1 for error and 2 for not implemented.
-
-% QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)
-While processing a wildcard, a referral was met. But it wasn't possible to get
-enough information for it. The code is 1 for error, 2 for not implemented.
-
-% QUERY_PROCESS processing query '%1/%2' in the '%3' class
-Debug information. A sure query is being processed now.
-
-% QUERY_RRSIG unable to answer RRSIG query
-The server is unable to answer a direct query for RRSIG type, but was asked
-to do so.
-
-% QUERY_MISPLACED_TASK task of this type should not be here
-This indicates a programming error. A task was found in the internal task
-queue, but this kind of task wasn't designed to be inside the queue (it should
-be handled right away, not queued).
-
-% QUERY_TASK_FAIL task failed with %1
-The query subtask failed. The reason should have been reported by the subtask
-already. The code is 1 for error, 2 for not implemented.
-
-% QUERY_MISSING_NS missing NS records for '%1'
-NS records should have been put into the authority section. However, this zone
-has none. This indicates problem with provided data.
-
-% UNEXPECTED_QUERY_STATE unexpected query state
-This indicates a programming error. An internal task of unknown type was
-generated.
-
-% QUERY_FAIL query failed
-Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
-
-% QUERY_BAD_REFERRAL bad referral to '%1'
-The domain lives in another zone. But it is not possible to generate referral
-information for it.
-
-% QUERY_WILDCARD_FAIL error processing wildcard for '%1'
-During an attempt to cover the domain by a wildcard an error happened. The
-exact kind was hopefully already reported.
-
-% QUERY_MISSING_SOA the zone '%1' has no SOA
-The answer should have been a negative one (eg. of nonexistence of something).
-To do so, a SOA record should be put into the authority section, but the zone
-does not have one. This indicates problem with provided data.
-
-% QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'
-The user wants DNSSEC and we discovered the entity doesn't exist (either
-domain or the record). But there was an error getting NSEC/NSEC3 record
-to prove the nonexistence.
-
-% QUERY_UNKNOWN_RESULT unknown result of subtask
-This indicates a programmer error. The answer of subtask doesn't look like
-anything known.
-
-% META_ADD adding a data source into meta data source
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
-
-% META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
-It was attempted to add a data source into a meta data source. But their
-classes do not match.
-
-% META_REMOVE removing data source from meta data source
-Debug information. A data source is being removed from meta data source.
-
-% MEM_ADD_WILDCARD adding wildcards for '%1'
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
-
-% MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'
-Someone or something tried to add a CNAME into a domain that already contains
-some other data. But the protocol forbids coexistence of CNAME with anything
-(RFC 1034, section 3.6.2). This indicates a problem with provided data.
-
-% MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'
-This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some other data to CNAME.
-
-% MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
-
-% MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'
-Some resource types are singletons -- only one is allowed in a domain
-(for example CNAME or SOA). This indicates a problem with provided data.
-
-% MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'
-It was attempted to add the domain into a zone that shouldn't have it
-(eg. the domain is not subdomain of the zone origin). This indicates a
-problem with provided data.
-
-% MEM_WILDCARD_NS NS record in wildcard domain '%1'
-The software refuses to load NS records into a wildcard domain. It isn't
-explicitly forbidden, but the protocol is ambiguous about how this should
-behave and BIND 9 refuses that as well. Please describe your intention using
-different tools.
-
-% MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'
-The software refuses to load DNAME records into a wildcard domain. It isn't
-explicitly forbidden, but the protocol is ambiguous about how this should
-behave and BIND 9 refuses that as well. Please describe your intention using
-different tools.
-
-% MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
-Debug information. An RRset is being added to the in-memory data source.
-
-% MEM_DUP_RRSET duplicate RRset '%1/%2'
-An RRset is being inserted into in-memory data source for a second time. The
-original version must be removed first. Note that loading master files where an
-RRset is split into multiple locations is not supported yet.
-
-% MEM_DNAME_ENCOUNTERED encountered a DNAME
-Debug information. While searching for the requested domain, a DNAME was
-encountered on the way. This may lead to redirection to a different domain and
-stop the search.
-
-% MEM_NS_ENCOUNTERED encountered a NS
-Debug information. While searching for the requested domain, a NS was
-encountered on the way (a delegation). This may lead to stop of the search.
-
-% MEM_RENAME renaming RRset from '%1' to '%2'
-Debug information. A RRset is being generated from a different RRset (most
-probably a wildcard). So it must be renamed to whatever the user asked for. In
-fact, it's impossible to rename RRsets with our libraries, so a new one is
-created and all resource records are copied over.
-
-% MEM_FIND find '%1/%2'
-Debug information. A search for the requested RRset is being started.
-
-% MEM_DNAME_FOUND DNAME found at '%1'
-Debug information. A DNAME was found instead of the requested information.
-
-% MEM_DELEG_FOUND delegation found at '%1'
-Debug information. A delegation point was found above the requested record.
-
-% MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
-Debug information. The search stopped at a superdomain of the requested
-domain. The domain is a empty nonterminal, therefore it is treated as NXRRSET
-case (eg. the domain exists, but it doesn't have the requested record type).
-
-% MEM_WILDCARD_CANCEL wildcard match canceled for '%1'
-Debug information. A domain above wildcard was reached, but there's something
-below the requested domain. Therefore the wildcard doesn't apply here. This
-behaviour is specified by RFC 1034, section 4.3.3
-
-% MEM_NOTFOUND requested domain '%1' not found
-Debug information. The requested domain does not exist.
-
-% MEM_DOMAIN_EMPTY requested domain '%1' is empty
-Debug information. The requested domain exists in the tree of domains, but
-it is empty. Therefore it doesn't contain the requested resource type.
-
-% MEM_EXACT_DELEGATION delegation at the exact domain '%1'
-Debug information. There's a NS record at the requested domain. This means
-this zone is not authoritative for the requested domain, but a delegation
-should be followed. The requested domain is an apex of some zone.
-
-% MEM_ANY_SUCCESS ANY query for '%1' successful
-Debug information. The domain was found and an ANY type query is being answered
-by providing everything found inside the domain.
-
-% MEM_SUCCESS query for '%1/%2' successful
-Debug information. The requested record was found.
-
-% MEM_CNAME CNAME at the domain '%1'
-Debug information. The requested domain is an alias to a different domain,
-returning the CNAME instead.
-
-% MEM_NXRRSET no such type '%1' at '%2'
-Debug information. The domain exists, but it doesn't hold any record of the
-requested type.
-
-% MEM_CREATE creating zone '%1' in '%2' class
-Debug information. A representation of a zone for the in-memory data source is
-being created.
-
-% MEM_DESTROY destroying zone '%1' in '%2' class
-Debug information. A zone from in-memory data source is being destroyed.
-
-% MEM_LOAD loading zone '%1' from file '%2'
-Debug information. The content of master file is being loaded into the memory.
-
-% MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
-Debug information. The contents of two in-memory zones are being exchanged.
-This is usual practice to do some manipulation in exception-safe manner -- the
-new data are prepared in a different zone object and when it works, they are
-swapped. The old one contains the new data and the other one can be safely
-destroyed.
-
-% MEM_ADD_ZONE adding zone '%1/%2'
-Debug information. A zone is being added into the in-memory data source.
-
-% MEM_FIND_ZONE looking for zone '%1'
-Debug information. A zone object for this zone is being searched for in the
-in-memory data source.
-
-% STATIC_CREATE creating the static datasource
-Debug information. The static data source (the one holding stuff like
-version.bind) is being created.
-
-% STATIC_BAD_CLASS static data source can handle CH only
-For some reason, someone asked the static data source a query that is not in
-the CH class.
-
-% STATIC_FIND looking for '%1/%2'
-Debug information. This resource record set is being looked up in the static
-data source.
-
-% SQLITE_FINDREC looking for record '%1/%2'
-Debug information. The SQLite data source is looking up records of given name
-and type in the database.
-
-% SQLITE_ENCLOSURE looking for zone containing '%1'
-Debug information. The SQLite data source is trying to identify which zone
-should hold this domain.
-
-% SQLITE_ENCLOSURE_NOTFOUND no zone contains it
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
-no such zone in our data.
-
-% SQLITE_PREVIOUS looking for name previous to '%1'
-Debug information. We're trying to look up name preceding the supplied one.
-
-% SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
-
-% SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'
-Debug information. We're trying to look up a NSEC3 record in the SQLite data
-source.
-
-% SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'
-The SQLite data source was asked to provide a NSEC3 record for given zone.
-But it doesn't contain that zone.
-
-% SQLITE_FIND looking for RRset '%1/%2'
-Debug information. The SQLite data source is looking up a resource record
-set.
-
-% SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
-The SQLite data source was looking up an RRset, but the data source contains
-different class than the query was for.
-
-% SQLITE_FINDEXACT looking for exact RRset '%1/%2'
-Debug information. The SQLite data source is looking up an exact resource
-record.
-
-% SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
-The SQLite data source was looking up an exact RRset, but the data source
-contains different class than the query was for.
-
-% SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'
-Debug information. The data source is looking up the addresses for given
-domain name.
-
-% SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')
-The SQLite data source was looking up A/AAAA addresses, but the data source
-contains different class than the query was for.
-
-% SQLITE_FINDREF looking for referral at '%1'
-Debug information. The SQLite data source is identifying if this domain is
-a referral and where it goes.
-
-% SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')
-The SQLite data source was trying to identify if there's a referral. But
-it contains different class than the query was for.
-
-% SQLITE_CREATE SQLite data source created
-Debug information. An instance of SQLite data source is being created.
-
-% SQLITE_DESTROY SQLite data source destroyed
-Debug information. An instance of SQLite data source is being destroyed.
-
-% SQLITE_SETUP setting up SQLite database
-The database for SQLite data source was found empty. It is assumed this is the
-first run and it is being initialized with current schema. It'll still contain
-no data, but it will be ready for use.
-
-% SQLITE_OPEN opening SQLite database '%1'
-Debug information. The SQLite data source is loading an SQLite database in
-the provided file.
-
-% SQLITE_CLOSE closing SQLite database
-Debug information. The SQLite data source is closing the database file.
diff --git a/src/lib/log/Makefile.am b/src/lib/log/Makefile.am
index 7833e0d..63b1dfb 100644
--- a/src/lib/log/Makefile.am
+++ b/src/lib/log/Makefile.am
@@ -8,7 +8,7 @@ CLEANFILES = *.gcno *.gcda
lib_LTLIBRARIES = liblog.la
liblog_la_SOURCES =
liblog_la_SOURCES += dummylog.h dummylog.cc
-liblog_la_SOURCES += impldef.cc impldef.h
+liblog_la_SOURCES += logimpl_messages.cc logimpl_messages.h
liblog_la_SOURCES += log_formatter.h log_formatter.cc
liblog_la_SOURCES += logger.cc logger.h
liblog_la_SOURCES += logger_impl.cc logger_impl.h
@@ -21,7 +21,7 @@ liblog_la_SOURCES += logger_name.cc logger_name.h
liblog_la_SOURCES += logger_specification.h
liblog_la_SOURCES += logger_support.cc logger_support.h
liblog_la_SOURCES += macros.h
-liblog_la_SOURCES += messagedef.cc messagedef.h
+liblog_la_SOURCES += log_messages.cc log_messages.h
liblog_la_SOURCES += message_dictionary.cc message_dictionary.h
liblog_la_SOURCES += message_exception.h
liblog_la_SOURCES += message_initializer.cc message_initializer.h
@@ -30,8 +30,8 @@ liblog_la_SOURCES += message_types.h
liblog_la_SOURCES += output_option.cc output_option.h
EXTRA_DIST = README
-EXTRA_DIST += impldef.mes
-EXTRA_DIST += messagedef.mes
+EXTRA_DIST += logimpl_messages.mes
+EXTRA_DIST += log_messages.mes
# Note: the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS)
diff --git a/src/lib/log/README b/src/lib/log/README
index 6b4cf11..d854dce 100644
--- a/src/lib/log/README
+++ b/src/lib/log/README
@@ -1,11 +1,12 @@
This directory holds the first release of the logging system.
+
Basic Ideas
===========
The BIND-10 logging system merges two ideas:
* A hierarchical logging system similar to that used in Java (i.e. log4j)
-* Separation of message definitions and text
+* Separation of message use from message text
Hierarchical Logging System
@@ -28,23 +29,26 @@ above, the INFO/Syslog attributes could be associated with the root logger
while the DEBUG/file attributes are associated with the "cache" logger.
-Separation of Messages Definitions And Text
-===========================================
-The reason for this is to allow the message text to be overridden by versions
-in a local language. To do this, each message is identified by an identifier
-e.g. "OPENIN". Within the program, this is the symbol passed to the logging
-system. The logger system uses the symbol as an index into a dictionary to
-retrieve the message associated with it (e.g. "unable to open %s for input").
-substitutes any message parameters (in this example, the string that is an
-invalid filename) and logs it to the destination.
+Separation of Messages Use from Message Text
+============================================
+By separating the use of the message from the text associated with this -
+in essence, defining message text in an external file - it is possible to
+replace the supplied text of the messages with a local language version.
-In the BIND-10 system, a set of default messages are linked into the
-program. At run-time. each program reads a message file, updating the
-stored definitions; this updated text is logged. However, to aid support,
-the message identifier so in the example above, the message finally logged
-would be something like:
+Each message is identified by an identifier e.g. "LOG_WRITE_ERROR".
+Within the program, this is the symbol passed to the logging system.
+The logger system uses the symbol as an index into a dictionary to
+retrieve the message associated with it (e.g. "unable to open %s for
+input"). It then substitutes any message parameters (in this example,
+the name of the file where the write operation failed) and logs it to
+the destination.
- FAC_OPENIN, unable to open a.txt for input
+In BIND-10, a the default text for each message is linked into the
+program. Each program is able to read a locally-defined message file
+when it starts, updating the stored definitions with site-specific text.
+When the message is logged, the updated text is output. However, the
+message identifier is always included in the output so that the origin
+of the message can be identified even if the text has been changed.
Using The System
@@ -52,8 +56,9 @@ Using The System
The steps in using the system are:
1. Create a message file. This defines messages by an identification - a
- mnemonic for the message, typically 6-12 characters long - and a message.
- The file is described in more detail below.
+ mnemonic for the message, the convention being that these are a few
+ words separated by underscores - and text that explains the message in
+ more detail. The file is described in more detail below.
Ideally the file should have a file type of ".mes".
@@ -73,9 +78,7 @@ The steps in using the system are:
described in more detail below.
5. To set the debug level and run-time message file, call initLogger (declared
- in logger_support.h) in the main program unit. This is a temporary solution
- for Year 2, and will be replaced at a later date, the information coming
- from the configuration database.
+ in logger_support.h) in the main program unit.
Message Files
@@ -90,16 +93,16 @@ An example file could be:
-- BEGIN --
# Example message file
-# $ID:$
-$PREFIX TEST_
$NAMESPACE isc::log
-% TEST1 message %1 is much too large
-This message is a test for the general message code
+% LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'
+A line starting with a dollar symbol was found, but the first word on the line
+(shown in the message) was not a recognised message compiler directive.
-% UNKNOWN unknown message
-Issued when the message is unknown.
+% LOG_WRITE_ERROR error writing to %1: %2
+The specified error was encountered by the message compiler when writing to
+the named output file.
-- END --
@@ -114,10 +117,8 @@ Points to note:
a line by themselves - inline comments will be interpreted as part of the
text of the line.
-* Lines starting $ are directives. At present, two directives are recognised:
-
- * $PREFIX, which has one optional argument: the string used to prefix symbols.
- If absent, there is no prefix to the symbols (prefixes are explained below).
+* Lines starting $ are directives. At present, just one directive is
+ recognised:
* $NAMESPACE, which has one argument: the namespace in which the symbols are
created. In the absence of a $NAMESPACE directive, symbols will be put in
@@ -127,10 +128,19 @@ Points to note:
identification and the message text, the latter including zero or more
replacement tokens, e.g.
- % TEST message %1 is larger than the permitted length of %2
+ % LOG_WRITE_ERROR error writing to %1: %2
* There may be zero or more spaces between the leading "%" and the message
- identification (which, in the example above, is the word "TEST").
+ identification (which, in the example above, is the string
+ "LOG_WRITE_ERROR").
+
+ * The message identification can be any string of letters, digits and
+ underscores, but should not start with a digit. The convention adopted
+ in BIND 10 is for the first component (before the first underscore) to be
+ a string indicating the origin of the message, and the remainder to
+ describe the message. So in the example above, the LOG_ indicates that
+ the error originated from the logging library and the "WRITE_ERROR"
+ indicates that there was a problem in a write operation.
* The replacement tokens are the strings "%1", "%2" etc. When a message
is logged, these are replaced with the arguments passed to the logging
@@ -147,42 +157,36 @@ Message Compiler
The message compiler is a program built in the src/log/compiler directory.
It is invoked by the command:
- message [-h] [-v] <message-file>
+ message [-h] [-v] -p] <message-file>
+
+("-v" prints the version number and exits; "-h" prints brief help text.) The
+compiler produces source files for C++ and Python.
-("-v" prints the version number and exits; "-h" prints brief help text.)
-The message compiler processes the message file to produce two files:
+C++ Files
+---------
+Without the "-p" option, the message compiler processes the message file
+to produce two files:
1) A C++ header file (called <message-file-name>.h) that holds lines of
the form:
namespace <namespace> {
- extern const isc::log::MessageID PREFIX_IDENTIFIER;
+ extern const isc::log::MessageID LOG_WRITE_ERROR;
:
}
The symbols define the keys in the global message dictionary, with the
namespace enclosing the symbols set by the $NAMESPACE directive.
-The "PREFIX_" part of the symbol name is the string defined in the $PREFIX
-the argument to the directive. So "$PREFIX MSG_" would prefix the identifier
-ABC with "MSG_" to give the symbol MSG_ABC. Similarly "$PREFIX E" would
-prefix it with "E" to give the symbol EABC. If no $PREFIX is given, no
-prefix appears (so the symbol in this example would be ABC).
-
-The prefix is "syntactic sugar". Generally all symbols in a given message file
-will be prefixed with the same set of letters. By extracting these into
-a separate prefix, it becomes easier to disambiguate the different symbols.
-
-There may be multiple $PREFIX directives in a file. A $PREFIX directive applies
-to all message definitions between it an the next $PREFIX directive. A $PREFIX
-directive with no arguments clears the current prefix.
+(This is the reason for the restriction on message identifiers - they
+have to be valid C++ symbol names.)
2) A C++ source file (called <message-file-name>.cc) that holds the definitions
of the global symbols and code to insert the symbols and messages into the map.
Symbols are defined to be equal to strings holding the identifier, e.g.
- extern const isc::log::MessageID MSG_DUPLNS = "MSG_DUPLNS";
+ extern const isc::log::MessageID LOG_WRITE_ERROR = "LOG_WRITE_ERROR";
(The implementation allows symbols to be compared. However, use of strings
should not be assumed - a future implementation may change this.)
@@ -208,16 +212,29 @@ A check is made as each is added; if the identifier already exists, it is
added to "overflow" vector; the vector is printed to the main logging output
when logging is finally enabled (to indicate a programming error).
+Python Files
+------------
+If the "-p" option is given, the compiler produces a Python module defining
+the messages. The format of this is:
+
+import isc.log
+ :
+LOG_WRITE_ERROR = isc.log.create_message("LOG_WRITE_ERROR",
+ "error writing to %1 : %2")
-Using the Logging
-=================
-To use the current version of the logging:
+(The definition is output on one line - it is split across two lines in this
+document for readability.)
+The module can be imported into other Python code, and messages logged
+in a similar way to C++ using the Python logging library.
+
+Using the Logging - C++
+=======================
1. Build message header file and source file as describe above.
2. The main program unit should include a call to isc::log::initLogger()
- (defined in logger_support.h) to set the logging severity, debug log level,
- and external message file:
+ (defined in logger_support.h) to set the logging severity, debug log
+ level, and external message file:
a) The logging severity is one of the enum defined in logger.h, i.e.
@@ -228,88 +245,134 @@ To use the current version of the logging:
isc::log::FATAL
isc::log::NONE
- b) The debug log level is only interpreted when the severity is DEBUG and
- is an integer ranging from 0 to 99. 0 should be used for the
- highest-level debug messages and 99 for the lowest-level (and typically
- more verbose) messages.
+ b) The debug log level is only interpreted when the severity is
+ DEBUG and is an integer ranging from 0 to 99. 0 should be used
+ for the highest-level debug messages and 99 for the lowest-level
+ (and typically more verbose) messages.
+
+ c) The external message file. If present, this is the same as a
+ standard message file, although it should not include any
+ directives. (A single directive of a particular type will be
+ ignored; multiple directives will cause the read of the file to
+ fail with an error.)
+
+ The settings remain in effect until the logging configuration is read,
+ and so provide the default logging during program initialization.
+
+3. Declare a logger through which the message will be logged.
+
+ isc::log::Logger logger("name");
+
+ The string passed to the constructor is the name of the logger (it
+ can be any string) and is used when configuring it. Loggers with
+ the same name share the same configuration.
- c) The external message file. If present, this is the same as a standard
- message file, although it should not include any directives. (A single
- directive of a particular type will be ignored; multiple directives will
- cause the read of the file to fail with an error.)
+4. Issue logging calls using supplied macros in "log/macros.h", e.g.
- The settings remain in effect until the logging configuration is read, and
- so provide the default logging during program initialization.
+ LOG_ERROR(logger, LOG_WRITE_ERROR).arg("output.txt");
-3. Issue logging calls using supplied macros in "log/macros.h", e.g.
+ (The macros are more efficient that calls to the methods on the logger
+ class: they avoid the overhead of evaluating the parameters to arg()
+ if the settings are such that the message is not going to be output.)
- LOG_ERROR(logger, DPS_NSTIMEOUT).arg("isc.org");
+Using the Logging - Python
+==========================
+1. Build message module as describe above.
- (The macros are more efficient that calls to the methods on the logger class:
- they avoid the overhead of evaluating the parameters to arg() if the
- settings are such that the message is not going to be output.)
+2. The main program unit should include a call to isc.log.init() to
+ set the to set the logging severity, debug log level, and external
+ message file:
- Note: in the example above we might have defined the symbol in the message
- file with something along the lines of:
+ a) The logging severity is one of the strings:
+
+ DEBUG
+ INFO
+ WARN
+ ERROR
+ FATAL
+ NONE
+
+ b) The debug log level is only interpreted when the severity is
+ DEBUG and is an integer ranging from 0 to 99. 0 should be used
+ for the highest-level debug messages and 99 for the lowest-level
+ (and typically more verbose) messages.
+
+ c) The external message file. If present, this is the same as a
+ standard message file, although it should not include any
+ directives. (Any that are there will be ignored.)
+
+ The settings remain in effect until the logging configuration is read,
+ and so provide the default logging during program initialization.
+
+3. Declare a logger through which the message will be logged.
+
+ isc.log.Logger logger("name")
+
+ The string passed to the constructor is the name of the logger (it
+ can be any string) and is used when configuring it. Loggers with
+ the same name share the same configuration.
+
+4. Issue calls to the logging methods:
+
+ logger.error(LOG_WRITE_ERROR, "output.txt");
- $PREFIX DPS_
- :
- NSTIMEOUT queries to all nameservers for %1 have timed out
Severity Guidelines
===================
-When using logging, the question arises, what severity should a message be
-logged at? The following is a suggestion - as always, the decision must be
-made in the context of which the message is logged.
+When using logging, the question arises, what severity should a message
+be logged at? The following is a suggestion - as always, the decision
+must be made in the context of which the message is logged.
-One thing that should always be borne in mind is whether the logging could
-be used as a vector for a DOS attack. For example, if a warning message is
-logged every time an invalid packet is received, an attacker could simply send
-large numbers of invalid packets. (Of course, warnings could be disabled (or
-just warnings for that that particular logger), but nevertheless the message
-is an attack vector.)
+One thing that should always be borne in mind is whether the logging
+could be used as a vector for a DOS attack. For example, if a warning
+message is logged every time an invalid packet is received, an attacker
+could simply send large numbers of invalid packets. (Of course, warnings
+could be disabled (or just warnings for that that particular logger),
+but nevertheless the message is an attack vector.)
FATAL
-----
-The program has encountered an error that is so severe that it cannot continue
-(or there is no point in continuing). When a fatal error has been logged,
-the program will usually exit immediately (or shortly afterwards) after
-dumping some diagnostic information.
+The program has encountered an error that is so severe that it cannot
+continue (or there is no point in continuing). When a fatal error
+has been logged, the program will usually exit immediately (or shortly
+afterwards) after dumping some diagnostic information.
ERROR
-----
-Something has happened such that the program can continue but the results
-for the current (or future) operations cannot be guaranteed to be correct,
-or the results will be correct but the service is impaired. For example,
-the program started but attempts to open one or more network interfaces failed.
+Something has happened such that the program can continue but the
+results for the current (or future) operations cannot be guaranteed to
+be correct, or the results will be correct but the service is impaired.
+For example, the program started but attempts to open one or more network
+interfaces failed.
WARN
----
An unusual event happened. Although the program will continue working
-normally, the event was sufficiently out of the ordinary to warrant drawing
-attention to it. For example, at program start-up a zone was loaded that
-contained no resource records,
+normally, the event was sufficiently out of the ordinary to warrant
+drawing attention to it. For example, at program start-up a zone was
+loaded that contained no resource records,
INFO
----
A normal but significant event has occurred that should be recorded,
-e.g. the program has started or is just about to terminate, a new zone has
-been created, etc.
+e.g. the program has started or is just about to terminate, a new zone
+has been created, etc.
DEBUG
-----
This severity is only enabled on for debugging purposes. A debug level is
associated with debug messages, level 0 (the default) being for high-level
-messages and level 99 (the maximum) for the lowest level. How the messages
-are distributed between the levels is up to the developer. So if debugging
-the NSAS (for example), a level 0 message might record the creation of a new
-zone, a level 10 recording a timeout when trying to get a nameserver address,
-but a level 50 would record every query for an address. (And we might add
-level 70 to record every update of the RTT.)
-
-Note that like severities, levels are cumulative; so if level 25 is set as the
-debug level, all debug levels from 0 to 25 will be output. In fact, it is
-probably easier to visualise the debug levels as part of the severity system:
+messages and level 99 (the maximum) for the lowest level. How the
+messages are distributed between the levels is up to the developer.
+So if debugging the NSAS (for example), a level 0 message might record
+the creation of a new zone, a level 10 recording a timeout when trying
+to get a nameserver address, but a level 50 would record every query for
+an address. (And we might add level 70 to record every update of the RTT.)
+
+Note that like severities, levels are cumulative; so if level 25 is
+set as the debug level, all debug levels from 0 to 25 will be output.
+In fact, it is probably easier to visualise the debug levels as part of
+the severity system:
FATAL High
ERROR
@@ -325,38 +388,34 @@ levels above it - will be logged.
Logging Sources v Logging Severities
------------------------------------
-When logging events, make a distinction between events related to the server
-and events related to DNS messages received. Caution needs to be exercised
-with the latter as, if the logging is enabled in the normal course of events,
-such logging could be a denial of service vector. For example, suppose that
-the main authoritative service logger were to log both zone loading and
-unloading as INFO and a warning message if it received an invalid packet. An
-attacker could make the INFO messages unusable by flooding the server with
-malformed packets.
+When logging events, make a distinction between events related to the
+server and events related to DNS messages received. Caution needs to
+be exercised with the latter as, if the logging is enabled in the normal
+course of events, such logging could be a denial of service vector. For
+example, suppose that the main authoritative service logger were to
+log both zone loading and unloading as INFO and a warning message if
+it received an invalid packet. An attacker could make the INFO messages
+unusable by flooding the server with malformed packets.
There are two approaches to get round this:
a) Make the logging of packet-dependent events a DEBUG-severity message.
-DEBUG is not enabled by default, so these events will not be recorded unless
-DEBUG is specifically chosen.
+DEBUG is not enabled by default, so these events will not be recorded
+unless DEBUG is specifically chosen.
b) Record system-related and packet-related messages via different loggers
-(e.g. in the example given, server events could be logged using the logger
-"auth" and packet-related events at that level logged using the logger
-"pkt-auth".) As the loggers are independent and the severity levels
-independent, fine-tuning of what and what is not recorded can be achieved.
+(e.g. in the example given, server events could be logged using the
+logger "auth" and packet-related events at that level logged using the
+logger "pkt-auth".) As the loggers are independent and the severity
+levels independent, fine-tuning of what and what is not recorded can
+be achieved.
Notes
=====
The message compiler is written in C++ (instead of Python) because it
contains a component that reads the message file. This component is used
-in both the message compiler and the server; in the server it is used when
-the server starts up (or when triggered by a command) to read in a message
-file to overwrite the internal dictionary. Writing it in C++ means there
-is only one piece of code that does this functionality.
-
-
-Outstanding Issues
-==================
-* Ability to configure system according to configuration database.
+in both the message compiler and the server; in the server it is used
+when the server starts up (or when triggered by a command) to read in
+a message file to overwrite the internal dictionary. Writing it in C++
+means there is only one piece of code that does this functionality.
diff --git a/src/lib/log/compiler/message.cc b/src/lib/log/compiler/message.cc
index 53a24ee..68335dc 100644
--- a/src/lib/log/compiler/message.cc
+++ b/src/lib/log/compiler/message.cc
@@ -28,10 +28,10 @@
#include <util/filename.h>
#include <util/strutil.h>
+#include <log/log_messages.h>
#include <log/message_dictionary.h>
#include <log/message_exception.h>
#include <log/message_reader.h>
-#include <log/messagedef.h>
#include <log/logger.h>
@@ -306,7 +306,7 @@ writeHeaderFile(const string& file, const vector<string>& ns_components,
ofstream hfile(header_file.fullName().c_str());
if (hfile.fail()) {
- throw MessageException(MSG_OPENOUT, header_file.fullName(),
+ throw MessageException(LOG_OPEN_OUTPUT_FAIL, header_file.fullName(),
strerror(errno));
}
@@ -340,7 +340,7 @@ writeHeaderFile(const string& file, const vector<string>& ns_components,
// Report errors (if any) and exit
if (hfile.fail()) {
- throw MessageException(MSG_WRITERR, header_file.fullName(),
+ throw MessageException(LOG_WRITE_ERROR, header_file.fullName(),
strerror(errno));
}
@@ -394,7 +394,7 @@ writeProgramFile(const string& file, const vector<string>& ns_components,
ofstream ccfile(program_file.fullName().c_str());
if (ccfile.fail()) {
- throw MessageException(MSG_OPENOUT, program_file.fullName(),
+ throw MessageException(LOG_OPEN_OUTPUT_FAIL, program_file.fullName(),
strerror(errno));
}
@@ -452,7 +452,7 @@ writeProgramFile(const string& file, const vector<string>& ns_components,
// Report errors (if any) and exit
if (ccfile.fail()) {
- throw MessageException(MSG_WRITERR, program_file.fullName(),
+ throw MessageException(LOG_WRITE_ERROR, program_file.fullName(),
strerror(errno));
}
diff --git a/src/lib/log/impldef.cc b/src/lib/log/impldef.cc
deleted file mode 100644
index 087ebea..0000000
--- a/src/lib/log/impldef.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// File created from impldef.mes on Wed Jun 1 10:32:57 2011
-
-#include <cstddef>
-#include <log/message_types.h>
-#include <log/message_initializer.h>
-
-namespace isc {
-namespace log {
-
-extern const isc::log::MessageID LOGIMPL_ABOVEDBGMAX = "LOGIMPL_ABOVEDBGMAX";
-extern const isc::log::MessageID LOGIMPL_BADDEBUG = "LOGIMPL_BADDEBUG";
-extern const isc::log::MessageID LOGIMPL_BELOWDBGMIN = "LOGIMPL_BELOWDBGMIN";
-
-} // namespace log
-} // namespace isc
-
-namespace {
-
-const char* values[] = {
- "LOGIMPL_ABOVEDBGMAX", "debug level of %1 is too high and will be set to the maximum of %2",
- "LOGIMPL_BADDEBUG", "debug string is '%1': must be of the form DEBUGn",
- "LOGIMPL_BELOWDBGMIN", "debug level of %1 is too low and will be set to the minimum of %2",
- NULL
-};
-
-const isc::log::MessageInitializer initializer(values);
-
-} // Anonymous namespace
-
diff --git a/src/lib/log/impldef.h b/src/lib/log/impldef.h
deleted file mode 100644
index 7c70996..0000000
--- a/src/lib/log/impldef.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// File created from impldef.mes on Wed Jun 1 10:32:57 2011
-
-#ifndef __IMPLDEF_H
-#define __IMPLDEF_H
-
-#include <log/message_types.h>
-
-namespace isc {
-namespace log {
-
-extern const isc::log::MessageID LOGIMPL_ABOVEDBGMAX;
-extern const isc::log::MessageID LOGIMPL_BADDEBUG;
-extern const isc::log::MessageID LOGIMPL_BELOWDBGMIN;
-
-} // namespace log
-} // namespace isc
-
-#endif // __IMPLDEF_H
diff --git a/src/lib/log/impldef.mes b/src/lib/log/impldef.mes
deleted file mode 100644
index 93e9fab..0000000
--- a/src/lib/log/impldef.mes
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# \brief Logger Implementation Messages
-#
-# This holds messages generated by the underlying logger implementation. They
-# are likely to be specific to that implementation, and may well change if the
-# underlying implementation is changed. For that reason, they have been put
-# in a separate file.
-
-$PREFIX LOGIMPL_
-$NAMESPACE isc::log
-
-% ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
-
-% BADDEBUG debug string is '%1': must be of the form DEBUGn
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form. In particular,
-it starts DEBUG but does not end with an integer.
-
-% BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
diff --git a/src/lib/log/log_messages.cc b/src/lib/log/log_messages.cc
new file mode 100644
index 0000000..a515959
--- /dev/null
+++ b/src/lib/log/log_messages.cc
@@ -0,0 +1,63 @@
+// File created from log_messages.mes on Wed Jun 22 11:54:57 2011
+
+#include <cstddef>
+#include <log/message_types.h>
+#include <log/message_initializer.h>
+
+namespace isc {
+namespace log {
+
+extern const isc::log::MessageID LOG_BAD_DESTINATION = "LOG_BAD_DESTINATION";
+extern const isc::log::MessageID LOG_BAD_SEVERITY = "LOG_BAD_SEVERITY";
+extern const isc::log::MessageID LOG_BAD_STREAM = "LOG_BAD_STREAM";
+extern const isc::log::MessageID LOG_DUPLICATE_MESSAGE_ID = "LOG_DUPLICATE_MESSAGE_ID";
+extern const isc::log::MessageID LOG_DUPLICATE_NAMESPACE = "LOG_DUPLICATE_NAMESPACE";
+extern const isc::log::MessageID LOG_INPUT_OPEN_FAIL = "LOG_INPUT_OPEN_FAIL";
+extern const isc::log::MessageID LOG_INVALID_MESSAGE_ID = "LOG_INVALID_MESSAGE_ID";
+extern const isc::log::MessageID LOG_NAMESPACE_EXTRA_ARGS = "LOG_NAMESPACE_EXTRA_ARGS";
+extern const isc::log::MessageID LOG_NAMESPACE_INVALID_ARG = "LOG_NAMESPACE_INVALID_ARG";
+extern const isc::log::MessageID LOG_NAMESPACE_NO_ARGS = "LOG_NAMESPACE_NO_ARGS";
+extern const isc::log::MessageID LOG_NO_MESSAGE_ID = "LOG_NO_MESSAGE_ID";
+extern const isc::log::MessageID LOG_NO_MESSAGE_TEXT = "LOG_NO_MESSAGE_TEXT";
+extern const isc::log::MessageID LOG_NO_SUCH_MESSAGE = "LOG_NO_SUCH_MESSAGE";
+extern const isc::log::MessageID LOG_OPEN_OUTPUT_FAIL = "LOG_OPEN_OUTPUT_FAIL";
+extern const isc::log::MessageID LOG_PREFIX_EXTRA_ARGS = "LOG_PREFIX_EXTRA_ARGS";
+extern const isc::log::MessageID LOG_PREFIX_INVALID_ARG = "LOG_PREFIX_INVALID_ARG";
+extern const isc::log::MessageID LOG_READING_LOCAL_FILE = "LOG_READING_LOCAL_FILE";
+extern const isc::log::MessageID LOG_READ_ERROR = "LOG_READ_ERROR";
+extern const isc::log::MessageID LOG_UNRECOGNISED_DIRECTIVE = "LOG_UNRECOGNISED_DIRECTIVE";
+extern const isc::log::MessageID LOG_WRITE_ERROR = "LOG_WRITE_ERROR";
+
+} // namespace log
+} // namespace isc
+
+namespace {
+
+const char* values[] = {
+ "LOG_BAD_DESTINATION", "unrecognized log destination: %1",
+ "LOG_BAD_SEVERITY", "unrecognized log severity: %1",
+ "LOG_BAD_STREAM", "bad log console output stream: %1",
+ "LOG_DUPLICATE_MESSAGE_ID", "duplicate message ID (%1) in compiled code",
+ "LOG_DUPLICATE_NAMESPACE", "line %1: duplicate $NAMESPACE directive found",
+ "LOG_INPUT_OPEN_FAIL", "unable to open message file %1 for input: %2",
+ "LOG_INVALID_MESSAGE_ID", "line %1: invalid message identification '%2'",
+ "LOG_NAMESPACE_EXTRA_ARGS", "line %1: $NAMESPACE directive has too many arguments",
+ "LOG_NAMESPACE_INVALID_ARG", "line %1: $NAMESPACE directive has an invalid argument ('%2')",
+ "LOG_NAMESPACE_NO_ARGS", "line %1: no arguments were given to the $NAMESPACE directive",
+ "LOG_NO_MESSAGE_ID", "line %1: message definition line found without a message ID",
+ "LOG_NO_MESSAGE_TEXT", "line %1: line found containing a message ID ('%2') and no text",
+ "LOG_NO_SUCH_MESSAGE", "could not replace message text for '%1': no such message",
+ "LOG_OPEN_OUTPUT_FAIL", "unable to open %1 for output: %2",
+ "LOG_PREFIX_EXTRA_ARGS", "line %1: $PREFIX directive has too many arguments",
+ "LOG_PREFIX_INVALID_ARG", "line %1: $PREFIX directive has an invalid argument ('%2')",
+ "LOG_READING_LOCAL_FILE", "reading local message file %1",
+ "LOG_READ_ERROR", "error reading from message file %1: %2",
+ "LOG_UNRECOGNISED_DIRECTIVE", "line %1: unrecognised directive '%2'",
+ "LOG_WRITE_ERROR", "error writing to %1: %2",
+ NULL
+};
+
+const isc::log::MessageInitializer initializer(values);
+
+} // Anonymous namespace
+
diff --git a/src/lib/log/log_messages.h b/src/lib/log/log_messages.h
new file mode 100644
index 0000000..476f686
--- /dev/null
+++ b/src/lib/log/log_messages.h
@@ -0,0 +1,35 @@
+// File created from log_messages.mes on Wed Jun 22 11:54:57 2011
+
+#ifndef __LOG_MESSAGES_H
+#define __LOG_MESSAGES_H
+
+#include <log/message_types.h>
+
+namespace isc {
+namespace log {
+
+extern const isc::log::MessageID LOG_BAD_DESTINATION;
+extern const isc::log::MessageID LOG_BAD_SEVERITY;
+extern const isc::log::MessageID LOG_BAD_STREAM;
+extern const isc::log::MessageID LOG_DUPLICATE_MESSAGE_ID;
+extern const isc::log::MessageID LOG_DUPLICATE_NAMESPACE;
+extern const isc::log::MessageID LOG_INPUT_OPEN_FAIL;
+extern const isc::log::MessageID LOG_INVALID_MESSAGE_ID;
+extern const isc::log::MessageID LOG_NAMESPACE_EXTRA_ARGS;
+extern const isc::log::MessageID LOG_NAMESPACE_INVALID_ARG;
+extern const isc::log::MessageID LOG_NAMESPACE_NO_ARGS;
+extern const isc::log::MessageID LOG_NO_MESSAGE_ID;
+extern const isc::log::MessageID LOG_NO_MESSAGE_TEXT;
+extern const isc::log::MessageID LOG_NO_SUCH_MESSAGE;
+extern const isc::log::MessageID LOG_OPEN_OUTPUT_FAIL;
+extern const isc::log::MessageID LOG_PREFIX_EXTRA_ARGS;
+extern const isc::log::MessageID LOG_PREFIX_INVALID_ARG;
+extern const isc::log::MessageID LOG_READING_LOCAL_FILE;
+extern const isc::log::MessageID LOG_READ_ERROR;
+extern const isc::log::MessageID LOG_UNRECOGNISED_DIRECTIVE;
+extern const isc::log::MessageID LOG_WRITE_ERROR;
+
+} // namespace log
+} // namespace isc
+
+#endif // __LOG_MESSAGES_H
diff --git a/src/lib/log/log_messages.mes b/src/lib/log/log_messages.mes
new file mode 100644
index 0000000..697ac92
--- /dev/null
+++ b/src/lib/log/log_messages.mes
@@ -0,0 +1,146 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \brief Message Utility Message File
+#
+# This is the source of the set of messages generated by the message and
+# logging components. The associated .h and .cc files are created by hand from
+# this file though and are not built during the build process; this is to avoid
+# the chicken-and-egg situation where we need the files to build the message
+# compiler, yet we need the compiler to build the files.
+
+$NAMESPACE isc::log
+
+% LOG_BAD_DESTINATION unrecognized log destination: %1
+A logger destination value was given that was not recognized. The
+destination should be one of "console", "file", or "syslog".
+
+% LOG_BAD_SEVERITY unrecognized log severity: %1
+A logger severity value was given that was not recognized. The severity
+should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+
+% LOG_BAD_STREAM bad log console output stream: %1
+A log console output stream was given that was not recognized. The output
+stream should be one of "stdout", or "stderr"
+
+% LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code
+During start-up, BIND10 detected that the given message identification had
+been defined multiple times in the BIND10 code.
+
+This has no ill-effects other than the possibility that an erronous
+message may be logged. However, as it is indicative of a programming
+error, please log a bug report.
+
+% LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found
+When reading a message file, more than one $NAMESPACE directive was found.
+Such a condition is regarded as an error and the read will be abandoned.
+
+% LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2
+The program was not able to open the specified input message file for
+the reason given.
+
+% LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+
+% LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+
+% LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+
+% LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+
+% LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+
+% LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+
+% LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message
+During start-up a local message file was read. A line with the listed
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+
+There may be several reasons why this message may appear:
+
+- The message ID has been mis-spelled in the local message file.
+
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+
+- The local file was written for an earlier version of the BIND10 software
+and the later version no longer generates that message.
+
+Whatever the reason, there is no impact on the operation of BIND10.
+
+% LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
+
+% LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND10.
+
+% LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restictions.
+
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND10.
+
+% LOG_READING_LOCAL_FILE reading local message file %1
+This is an informational message output by BIND10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
+
+% LOG_READ_ERROR error reading from message file %1: %2
+The specified error was encountered reading from the named message file.
+
+% LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
+
+% LOG_WRITE_ERROR error writing to %1: %2
+The specified error was encountered by the message compiler when writing
+to the named output file.
diff --git a/src/lib/log/logger_level.cc b/src/lib/log/logger_level.cc
index 5f74eb3..abac5be 100644
--- a/src/lib/log/logger_level.cc
+++ b/src/lib/log/logger_level.cc
@@ -14,7 +14,7 @@
#include <log/logger_level.h>
#include <log/macros.h>
-#include <log/messagedef.h>
+#include <log/log_messages.h>
#include <boost/algorithm/string.hpp>
@@ -38,7 +38,7 @@ getSeverity(const std::string& sev_str) {
return isc::log::NONE;
} else {
Logger logger("log");
- LOG_ERROR(logger, MSG_BADSEVERITY).arg(sev_str);
+ LOG_ERROR(logger, LOG_BAD_SEVERITY).arg(sev_str);
return isc::log::INFO;
}
}
diff --git a/src/lib/log/logger_level_impl.cc b/src/lib/log/logger_level_impl.cc
index d6d8ed7..397f6d4 100644
--- a/src/lib/log/logger_level_impl.cc
+++ b/src/lib/log/logger_level_impl.cc
@@ -19,9 +19,9 @@
#include <log4cplus/logger.h>
-#include <log/impldef.h>
#include <log/logger_level.h>
#include <log/logger_level_impl.h>
+#include <log/logimpl_messages.h>
#include <log/macros.h>
using namespace log4cplus;
@@ -157,12 +157,12 @@ LoggerLevelImpl::logLevelFromString(const log4cplus::tstring& level) {
try {
int dbglevel = boost::lexical_cast<int>(name.substr(5));
if (dbglevel < MIN_DEBUG_LEVEL) {
- LOG_WARN(logger, LOGIMPL_BELOWDBGMIN).arg(dbglevel)
+ LOG_WARN(logger, LOGIMPL_BELOW_MIN_DEBUG).arg(dbglevel)
.arg(MIN_DEBUG_LEVEL);
dbglevel = MIN_DEBUG_LEVEL;
} else if (dbglevel > MAX_DEBUG_LEVEL) {
- LOG_WARN(logger, LOGIMPL_ABOVEDBGMAX).arg(dbglevel)
+ LOG_WARN(logger, LOGIMPL_ABOVE_MAX_DEBUG).arg(dbglevel)
.arg(MAX_DEBUG_LEVEL);
dbglevel = MAX_DEBUG_LEVEL;
@@ -170,7 +170,7 @@ LoggerLevelImpl::logLevelFromString(const log4cplus::tstring& level) {
return convertFromBindLevel(Level(DEBUG, dbglevel));
}
catch (boost::bad_lexical_cast&) {
- LOG_ERROR(logger, LOGIMPL_BADDEBUG).arg(name);
+ LOG_ERROR(logger, LOGIMPL_BAD_DEBUG_STRING).arg(name);
return (NOT_SET_LOG_LEVEL);
}
}
diff --git a/src/lib/log/logger_manager.cc b/src/lib/log/logger_manager.cc
index 4d56e4b..70e0d6f 100644
--- a/src/lib/log/logger_manager.cc
+++ b/src/lib/log/logger_manager.cc
@@ -15,20 +15,19 @@
#include <algorithm>
#include <vector>
-#include <log/logger_level.h>
-#include <log/logger_manager_impl.h>
+#include <log/logger.h>
#include <log/logger_manager.h>
+#include <log/logger_manager_impl.h>
#include <log/logger_name.h>
#include <log/logger_support.h>
-#include <log/messagedef.h>
+#include <log/log_messages.h>
+#include <log/macros.h>
#include <log/message_dictionary.h>
#include <log/message_exception.h>
#include <log/message_initializer.h>
+#include <log/message_initializer.h>
#include <log/message_reader.h>
#include <log/message_types.h>
-#include <log/macros.h>
-#include <log/messagedef.h>
-#include <log/message_initializer.h>
using namespace std;
@@ -125,7 +124,7 @@ LoggerManager::init(const std::string& root, isc::log::Severity severity,
sort(duplicates.begin(), duplicates.end());
for (vector<string>::iterator i = duplicates.begin();
i != duplicates.end(); ++i) {
- LOG_WARN(logger, MSG_DUPMSGID).arg(*i);
+ LOG_WARN(logger, LOG_DUPLICATE_MESSAGE_ID).arg(*i);
}
}
@@ -147,7 +146,7 @@ LoggerManager::readLocalMessageFile(const char* file) {
MessageReader reader(&dictionary);
try {
- logger.info(MSG_RDLOCMES).arg(file);
+ logger.info(LOG_READING_LOCAL_FILE).arg(file);
reader.readFile(file, MessageReader::REPLACE);
// File successfully read. As each message in the file is supposed to
@@ -158,7 +157,7 @@ LoggerManager::readLocalMessageFile(const char* file) {
for (MessageReader::MessageIDCollection::const_iterator
i = unknown.begin(); i != unknown.end(); ++i) {
string message_id = boost::lexical_cast<string>(*i);
- logger.warn(MSG_IDNOTFND).arg(message_id);
+ logger.warn(LOG_NO_SUCH_MESSAGE).arg(message_id);
}
}
catch (MessageException& e) {
diff --git a/src/lib/log/logger_manager_impl.cc b/src/lib/log/logger_manager_impl.cc
index 92806d0..d69cec8 100644
--- a/src/lib/log/logger_manager_impl.cc
+++ b/src/lib/log/logger_manager_impl.cc
@@ -21,13 +21,13 @@
#include <log4cplus/fileappender.h>
#include <log4cplus/syslogappender.h>
-#include "log/logger.h"
-#include "log/logger_level_impl.h"
-#include "log/logger_manager.h"
-#include "log/logger_manager_impl.h"
-#include "log/logger_name.h"
-#include "log/logger_specification.h"
-#include "log/messagedef.h"
+#include <log/logger.h>
+#include <log/logger_level_impl.h>
+#include <log/logger_manager.h>
+#include <log/logger_manager_impl.h>
+#include <log/log_messages.h>
+#include <log/logger_name.h>
+#include <log/logger_specification.h>
using namespace std;
diff --git a/src/lib/log/logimpl_messages.cc b/src/lib/log/logimpl_messages.cc
new file mode 100644
index 0000000..ca8552e
--- /dev/null
+++ b/src/lib/log/logimpl_messages.cc
@@ -0,0 +1,29 @@
+// File created from logimpl_messages.mes on Wed Jun 22 10:57:02 2011
+
+#include <cstddef>
+#include <log/message_types.h>
+#include <log/message_initializer.h>
+
+namespace isc {
+namespace log {
+
+extern const isc::log::MessageID LOGIMPL_ABOVE_MAX_DEBUG = "LOGIMPL_ABOVE_MAX_DEBUG";
+extern const isc::log::MessageID LOGIMPL_BAD_DEBUG_STRING = "LOGIMPL_BAD_DEBUG_STRING";
+extern const isc::log::MessageID LOGIMPL_BELOW_MIN_DEBUG = "LOGIMPL_BELOW_MIN_DEBUG";
+
+} // namespace log
+} // namespace isc
+
+namespace {
+
+const char* values[] = {
+ "LOGIMPL_ABOVE_MAX_DEBUG", "debug level of %1 is too high and will be set to the maximum of %2",
+ "LOGIMPL_BAD_DEBUG_STRING", "debug string '%1' has invalid format",
+ "LOGIMPL_BELOW_MIN_DEBUG", "debug level of %1 is too low and will be set to the minimum of %2",
+ NULL
+};
+
+const isc::log::MessageInitializer initializer(values);
+
+} // Anonymous namespace
+
diff --git a/src/lib/log/logimpl_messages.h b/src/lib/log/logimpl_messages.h
new file mode 100644
index 0000000..1b94838
--- /dev/null
+++ b/src/lib/log/logimpl_messages.h
@@ -0,0 +1,18 @@
+// File created from logimpl_messages.mes on Wed Jun 22 10:57:02 2011
+
+#ifndef __LOGIMPL_MESSAGES_H
+#define __LOGIMPL_MESSAGES_H
+
+#include <log/message_types.h>
+
+namespace isc {
+namespace log {
+
+extern const isc::log::MessageID LOGIMPL_ABOVE_MAX_DEBUG;
+extern const isc::log::MessageID LOGIMPL_BAD_DEBUG_STRING;
+extern const isc::log::MessageID LOGIMPL_BELOW_MIN_DEBUG;
+
+} // namespace log
+} // namespace isc
+
+#endif // __LOGIMPL_MESSAGES_H
diff --git a/src/lib/log/logimpl_messages.mes b/src/lib/log/logimpl_messages.mes
new file mode 100644
index 0000000..c40f80c
--- /dev/null
+++ b/src/lib/log/logimpl_messages.mes
@@ -0,0 +1,43 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \brief Logger Implementation Messages
+#
+# This holds messages generated by the underlying logger implementation. They
+# are likely to be specific to that implementation, and may well change if the
+# underlying implementation is changed. For that reason, they have been put
+# in a separate file.
+
+$NAMESPACE isc::log
+
+% LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+
+% LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
+
+% LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
diff --git a/src/lib/log/message_reader.cc b/src/lib/log/message_reader.cc
index 1a0b242..2710ab8 100644
--- a/src/lib/log/message_reader.cc
+++ b/src/lib/log/message_reader.cc
@@ -20,8 +20,8 @@
#include <iostream>
#include <fstream>
+#include <log/log_messages.h>
#include <log/message_exception.h>
-#include <log/messagedef.h>
#include <log/message_reader.h>
#include <util/strutil.h>
@@ -48,7 +48,7 @@ MessageReader::readFile(const string& file, MessageReader::Mode mode) {
// Open the file.
ifstream infile(file.c_str());
if (infile.fail()) {
- throw MessageException(MSG_OPENIN, file, strerror(errno));
+ throw MessageException(LOG_INPUT_OPEN_FAIL, file, strerror(errno));
}
// Loop round reading it. As we process the file one line at a time,
@@ -65,7 +65,7 @@ MessageReader::readFile(const string& file, MessageReader::Mode mode) {
// Why did the loop terminate?
if (!infile.eof()) {
- throw MessageException(MSG_READERR, file, strerror(errno));
+ throw MessageException(LOG_READ_ERROR, file, strerror(errno));
}
infile.close();
}
@@ -114,7 +114,7 @@ MessageReader::parseDirective(const std::string& text) {
} else {
// Unrecognised directive
- throw MessageException(MSG_UNRECDIR, tokens[0], lineno_);
+ throw MessageException(LOG_UNRECOGNISED_DIRECTIVE, tokens[0], lineno_);
}
}
@@ -138,13 +138,13 @@ MessageReader::parsePrefix(const vector<string>& tokens) {
// and numeric characters (and underscores) and does not start with a
// digit.
if (invalidSymbol(prefix_)) {
- throw MessageException(MSG_PRFINVARG, prefix_, lineno_);
+ throw MessageException(LOG_PREFIX_INVALID_ARG, prefix_, lineno_);
}
} else {
// Too many arguments
- throw MessageException(MSG_PRFEXTRARG, lineno_);
+ throw MessageException(LOG_PREFIX_EXTRA_ARGS, lineno_);
}
}
@@ -172,10 +172,10 @@ MessageReader::parseNamespace(const vector<string>& tokens) {
// Check argument count
if (tokens.size() < 2) {
- throw MessageException(MSG_NSNOARG, lineno_);
+ throw MessageException(LOG_NAMESPACE_NO_ARGS, lineno_);
} else if (tokens.size() > 2) {
- throw MessageException(MSG_NSEXTRARG, lineno_);
+ throw MessageException(LOG_NAMESPACE_EXTRA_ARGS, lineno_);
}
@@ -187,12 +187,12 @@ MessageReader::parseNamespace(const vector<string>& tokens) {
"abcdefghijklmnopqrstuvwxyz"
"0123456789_:";
if (tokens[1].find_first_not_of(valid_chars) != string::npos) {
- throw MessageException(MSG_NSINVARG, tokens[1], lineno_);
+ throw MessageException(LOG_NAMESPACE_INVALID_ARG, tokens[1], lineno_);
}
// All OK - unless the namespace has already been set.
if (ns_.size() != 0) {
- throw MessageException(MSG_DUPLNS, lineno_);
+ throw MessageException(LOG_DUPLICATE_NAMESPACE, lineno_);
}
// Prefix has not been set, so set it and return success.
@@ -219,7 +219,7 @@ MessageReader::parseMessage(const std::string& text, MessageReader::Mode mode) {
// A line comprising just the message introducer is not valid.
if (text.size() == 1) {
- throw MessageException(MSG_NOMSGID, text, lineno_);
+ throw MessageException(LOG_NO_MESSAGE_ID, text, lineno_);
}
// Strip off the introducer and any leading space after that.
@@ -230,7 +230,7 @@ MessageReader::parseMessage(const std::string& text, MessageReader::Mode mode) {
if (first_delim == string::npos) {
// Just a single token in the line - this is not valid
- throw MessageException(MSG_NOMSGTXT, message_line, lineno_);
+ throw MessageException(LOG_NO_MESSAGE_TEXT, message_line, lineno_);
}
// Extract the first token into the message ID, preceding it with the
@@ -240,7 +240,7 @@ MessageReader::parseMessage(const std::string& text, MessageReader::Mode mode) {
string ident = prefix_ + message_line.substr(0, first_delim);
if (prefix_.empty()) {
if (invalidSymbol(ident)) {
- throw MessageException(MSG_INVMSGID, ident, lineno_);
+ throw MessageException(LOG_INVALID_MESSAGE_ID, ident, lineno_);
}
}
isc::util::str::uppercase(ident);
@@ -252,7 +252,7 @@ MessageReader::parseMessage(const std::string& text, MessageReader::Mode mode) {
// ?? This happens if there are trailing delimiters, which should not
// occur as we have stripped trailing spaces off the line. Just treat
// this as a single-token error for simplicity's sake.
- throw MessageException(MSG_NOMSGTXT, message_line, lineno_);
+ throw MessageException(LOG_NO_MESSAGE_TEXT, message_line, lineno_);
}
// Add the result to the dictionary and to the non-added list if the add to
diff --git a/src/lib/log/messagedef.cc b/src/lib/log/messagedef.cc
deleted file mode 100644
index 853722a..0000000
--- a/src/lib/log/messagedef.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// File created from messagedef.mes on Fri May 27 14:49:45 2011
-
-#include <cstddef>
-#include <log/message_types.h>
-#include <log/message_initializer.h>
-
-namespace isc {
-namespace log {
-
-extern const isc::log::MessageID MSG_BADDESTINATION = "MSG_BADDESTINATION";
-extern const isc::log::MessageID MSG_BADSEVERITY = "MSG_BADSEVERITY";
-extern const isc::log::MessageID MSG_BADSTREAM = "MSG_BADSTREAM";
-extern const isc::log::MessageID MSG_DUPLNS = "MSG_DUPLNS";
-extern const isc::log::MessageID MSG_DUPMSGID = "MSG_DUPMSGID";
-extern const isc::log::MessageID MSG_IDNOTFND = "MSG_IDNOTFND";
-extern const isc::log::MessageID MSG_INVMSGID = "MSG_INVMSGID";
-extern const isc::log::MessageID MSG_NOMSGID = "MSG_NOMSGID";
-extern const isc::log::MessageID MSG_NOMSGTXT = "MSG_NOMSGTXT";
-extern const isc::log::MessageID MSG_NSEXTRARG = "MSG_NSEXTRARG";
-extern const isc::log::MessageID MSG_NSINVARG = "MSG_NSINVARG";
-extern const isc::log::MessageID MSG_NSNOARG = "MSG_NSNOARG";
-extern const isc::log::MessageID MSG_OPENIN = "MSG_OPENIN";
-extern const isc::log::MessageID MSG_OPENOUT = "MSG_OPENOUT";
-extern const isc::log::MessageID MSG_PRFEXTRARG = "MSG_PRFEXTRARG";
-extern const isc::log::MessageID MSG_PRFINVARG = "MSG_PRFINVARG";
-extern const isc::log::MessageID MSG_RDLOCMES = "MSG_RDLOCMES";
-extern const isc::log::MessageID MSG_READERR = "MSG_READERR";
-extern const isc::log::MessageID MSG_UNRECDIR = "MSG_UNRECDIR";
-extern const isc::log::MessageID MSG_WRITERR = "MSG_WRITERR";
-
-} // namespace log
-} // namespace isc
-
-namespace {
-
-const char* values[] = {
- "MSG_BADDESTINATION", "unrecognized log destination: %1",
- "MSG_BADSEVERITY", "unrecognized log severity: %1",
- "MSG_BADSTREAM", "bad log console output stream: %1",
- "MSG_DUPLNS", "line %1: duplicate $NAMESPACE directive found",
- "MSG_DUPMSGID", "duplicate message ID (%1) in compiled code",
- "MSG_IDNOTFND", "could not replace message text for '%1': no such message",
- "MSG_INVMSGID", "line %1: invalid message identification '%2'",
- "MSG_NOMSGID", "line %1: message definition line found without a message ID",
- "MSG_NOMSGTXT", "line %1: line found containing a message ID ('%2') and no text",
- "MSG_NSEXTRARG", "line %1: $NAMESPACE directive has too many arguments",
- "MSG_NSINVARG", "line %1: $NAMESPACE directive has an invalid argument ('%2')",
- "MSG_NSNOARG", "line %1: no arguments were given to the $NAMESPACE directive",
- "MSG_OPENIN", "unable to open message file %1 for input: %2",
- "MSG_OPENOUT", "unable to open %1 for output: %2",
- "MSG_PRFEXTRARG", "line %1: $PREFIX directive has too many arguments",
- "MSG_PRFINVARG", "line %1: $PREFIX directive has an invalid argument ('%2')",
- "MSG_RDLOCMES", "reading local message file %1",
- "MSG_READERR", "error reading from message file %1: %2",
- "MSG_UNRECDIR", "line %1: unrecognised directive '%2'",
- "MSG_WRITERR", "error writing to %1: %2",
- NULL
-};
-
-const isc::log::MessageInitializer initializer(values);
-
-} // Anonymous namespace
-
diff --git a/src/lib/log/messagedef.h b/src/lib/log/messagedef.h
deleted file mode 100644
index bdb1075..0000000
--- a/src/lib/log/messagedef.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// File created from messagedef.mes on Fri May 27 14:49:45 2011
-
-#ifndef __MESSAGEDEF_H
-#define __MESSAGEDEF_H
-
-#include <log/message_types.h>
-
-namespace isc {
-namespace log {
-
-extern const isc::log::MessageID MSG_BADDESTINATION;
-extern const isc::log::MessageID MSG_BADSEVERITY;
-extern const isc::log::MessageID MSG_BADSTREAM;
-extern const isc::log::MessageID MSG_DUPLNS;
-extern const isc::log::MessageID MSG_DUPMSGID;
-extern const isc::log::MessageID MSG_IDNOTFND;
-extern const isc::log::MessageID MSG_INVMSGID;
-extern const isc::log::MessageID MSG_NOMSGID;
-extern const isc::log::MessageID MSG_NOMSGTXT;
-extern const isc::log::MessageID MSG_NSEXTRARG;
-extern const isc::log::MessageID MSG_NSINVARG;
-extern const isc::log::MessageID MSG_NSNOARG;
-extern const isc::log::MessageID MSG_OPENIN;
-extern const isc::log::MessageID MSG_OPENOUT;
-extern const isc::log::MessageID MSG_PRFEXTRARG;
-extern const isc::log::MessageID MSG_PRFINVARG;
-extern const isc::log::MessageID MSG_RDLOCMES;
-extern const isc::log::MessageID MSG_READERR;
-extern const isc::log::MessageID MSG_UNRECDIR;
-extern const isc::log::MessageID MSG_WRITERR;
-
-} // namespace log
-} // namespace isc
-
-#endif // __MESSAGEDEF_H
diff --git a/src/lib/log/messagedef.mes b/src/lib/log/messagedef.mes
deleted file mode 100644
index a54931b..0000000
--- a/src/lib/log/messagedef.mes
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# \brief Message Utility Message File
-#
-# This is the source of the set of messages generated by the message and
-# logging components. The associated .h and .cc files are created by hand from
-# this file though and are not built during the build process; this is to avoid
-# the chicken-and-egg situation where we need the files to build the message
-# compiler, yet we need the compiler to build the files.
-
-$PREFIX MSG_
-$NAMESPACE isc::log
-
-% DUPMSGID duplicate message ID (%1) in compiled code
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules. (All message
-IDs should be unique throughout BIND10.) This has no impact on the operation
-of the server other that erroneous messages may be logged. (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded. However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
-
-% DUPLNS line %1: duplicate $NAMESPACE directive found
-When reading a message file, more than one $NAMESPACE directive was found. In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
-
-% IDNOTFND could not replace message text for '%1': no such message
-During start-up a local message file was read. A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary. Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
-
-This message may appear a number of times in the file, once for every such
-unknown message identification.
-
-% INVMSGID line %1: invalid message identification '%2'
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
-
-% NOMSGID line %1: message definition line found without a message ID
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
-
-% NOMSGTXT line %1: line found containing a message ID ('%2') and no text
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
-
-% NSEXTRARG line %1: $NAMESPACE directive has too many arguments
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
-
-% NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')
-The $NAMESPACE argument should be a valid C++ namespace. The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct. The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
-
-% NSNOARG line %1: no arguments were given to the $NAMESPACE directive
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
-
-% OPENIN unable to open message file %1 for input: %2
-The program was not able to open the specified input message file for the
-reason given.
-
-% OPENOUT unable to open %1 for output: %2
-The program was not able to open the specified output file for the reason
-given.
-
-% PRFEXTRARG line %1: $PREFIX directive has too many arguments
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created. This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
-
-% PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')
-The $PREFIX argument is used in a symbol name in a C++ header file. As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
-
-% RDLOCMES reading local message file %1
-This is an informational message output by BIND10 when it starts to read a
-local message file. (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
-
-% READERR error reading from message file %1: %2
-The specified error was encountered reading from the named message file.
-
-% WRITERR error writing to %1: %2
-The specified error was encountered by the message compiler when writing to
-the named output file.
-
-% UNRECDIR line %1: unrecognised directive '%2'
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
-
-% BADSEVERITY unrecognized log severity: %1
-A logger severity value was given that was not recognized. The severity
-should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
-
-% BADDESTINATION unrecognized log destination: %1
-A logger destination value was given that was not recognized. The
-destination should be one of "console", "file", or "syslog".
-
-% BADSTREAM bad log console output stream: %1
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
diff --git a/src/lib/log/output_option.cc b/src/lib/log/output_option.cc
index 191631d..f56efb9 100644
--- a/src/lib/log/output_option.cc
+++ b/src/lib/log/output_option.cc
@@ -13,12 +13,13 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <string>
-#include <log/output_option.h>
-#include <log/macros.h>
-#include <log/messagedef.h>
#include <boost/algorithm/string.hpp>
+#include <log/log_messages.h>
+#include <log/macros.h>
+#include <log/output_option.h>
+
namespace isc {
namespace log {
@@ -32,7 +33,7 @@ getDestination(const std::string& dest_str) {
return OutputOption::DEST_SYSLOG;
} else {
Logger logger("log");
- LOG_ERROR(logger, MSG_BADDESTINATION).arg(dest_str);
+ LOG_ERROR(logger, LOG_BAD_DESTINATION).arg(dest_str);
return OutputOption::DEST_CONSOLE;
}
}
@@ -45,7 +46,7 @@ getStream(const std::string& stream_str) {
return OutputOption::STR_STDOUT;
} else {
Logger logger("log");
- LOG_ERROR(logger, MSG_BADSTREAM).arg(stream_str);
+ LOG_ERROR(logger, LOG_BAD_STREAM).arg(stream_str);
return OutputOption::STR_STDOUT;
}
}
diff --git a/src/lib/log/tests/destination_test.sh.in b/src/lib/log/tests/destination_test.sh.in
index e02141d..41a52ee 100755
--- a/src/lib/log/tests/destination_test.sh.in
+++ b/src/lib/log/tests/destination_test.sh.in
@@ -37,10 +37,10 @@ passfail() {
echo "1. One logger, multiple destinations:"
cat > $tempfile << .
-FATAL [example] MSG_WRITERR error writing to test1: 42
-ERROR [example] MSG_RDLOCMES reading local message file dummy/file
-FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
.
rm -f $destfile1 $destfile2
./logger_example -s error -f $destfile1 -f $destfile2
@@ -61,13 +61,13 @@ rm -f $destfile1 $destfile2
# Output for example.alpha should have done to destfile2.
cat > $tempfile << .
-FATAL [example] MSG_WRITERR error writing to test1: 42
-ERROR [example] MSG_RDLOCMES reading local message file dummy/file
-WARN [example] MSG_BADSTREAM bad log console output stream: example
-FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
-WARN [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
-INFO [example.beta] MSG_READERR error reading from message file beta: info
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+WARN [example] LOG_BAD_STREAM bad log console output stream: example
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+WARN [example.beta] LOG_BAD_STREAM bad log console output stream: beta_warn
+INFO [example.beta] LOG_READ_ERROR error reading from message file beta: info
.
echo -n " - destination 1:"
cut -d' ' -f3- $destfile1 | diff $tempfile -
@@ -75,7 +75,7 @@ passfail $?
echo -n " - destination 2:"
cat > $tempfile << .
-WARN [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
+WARN [example.alpha] LOG_READ_ERROR error reading from message file a.txt: dummy reason
.
cut -d' ' -f3- $destfile2 | diff $tempfile -
passfail $?
diff --git a/src/lib/log/tests/local_file_test.sh.in b/src/lib/log/tests/local_file_test.sh.in
index 4308f96..d76f48f 100755
--- a/src/lib/log/tests/local_file_test.sh.in
+++ b/src/lib/log/tests/local_file_test.sh.in
@@ -37,35 +37,35 @@ passfail() {
# Create the local message file for testing
cat > $localmes << .
-% MSG_NOTHERE this message is not in the global dictionary
-% MSG_READERR replacement read error, parameters: '%1' and '%2'
-% MSG_RDLOCMES replacement read local message file, parameter is '%1'
+% LOG_NOTHERE this message is not in the global dictionary
+% LOG_READ_ERROR replacement read error, parameters: '%1' and '%2'
+% LOG_READING_LOCAL_FILE replacement read local message file, parameter is '%1'
.
echo -n "1. Local message replacement:"
cat > $tempfile << .
-WARN [example.log] MSG_IDNOTFND could not replace message text for 'MSG_NOTHERE': no such message
-FATAL [example] MSG_WRITERR error writing to test1: 42
-ERROR [example] MSG_RDLOCMES replacement read local message file, parameter is 'dummy/file'
-WARN [example] MSG_BADSTREAM bad log console output stream: example
-WARN [example.alpha] MSG_READERR replacement read error, parameters: 'a.txt' and 'dummy reason'
-FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
-WARN [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
+WARN [example.log] LOG_NO_SUCH_MESSAGE could not replace message text for 'LOG_NOTHERE': no such message
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE replacement read local message file, parameter is 'dummy/file'
+WARN [example] LOG_BAD_STREAM bad log console output stream: example
+WARN [example.alpha] LOG_READ_ERROR replacement read error, parameters: 'a.txt' and 'dummy reason'
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+WARN [example.beta] LOG_BAD_STREAM bad log console output stream: beta_warn
.
./logger_example -c stdout -s warn $localmes | cut -d' ' -f3- | diff $tempfile -
passfail $?
echo -n "2. Report error if unable to read local message file:"
cat > $tempfile << .
-ERROR [example.log] MSG_OPENIN unable to open message file $localmes for input: No such file or directory
-FATAL [example] MSG_WRITERR error writing to test1: 42
-ERROR [example] MSG_RDLOCMES reading local message file dummy/file
-WARN [example] MSG_BADSTREAM bad log console output stream: example
-WARN [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
-FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
-WARN [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
+ERROR [example.log] LOG_INPUT_OPEN_FAIL unable to open message file $localmes for input: No such file or directory
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+WARN [example] LOG_BAD_STREAM bad log console output stream: example
+WARN [example.alpha] LOG_READ_ERROR error reading from message file a.txt: dummy reason
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+WARN [example.beta] LOG_BAD_STREAM bad log console output stream: beta_warn
.
rm -f $localmes
./logger_example -c stdout -s warn $localmes | cut -d' ' -f3- | diff $tempfile -
diff --git a/src/lib/log/tests/logger_example.cc b/src/lib/log/tests/logger_example.cc
index 6b43c18..2170066 100644
--- a/src/lib/log/tests/logger_example.cc
+++ b/src/lib/log/tests/logger_example.cc
@@ -40,7 +40,7 @@
#include <log/macros.h>
// Include a set of message definitions.
-#include <log/messagedef.h>
+#include <log/log_messages.h>
using namespace isc::log;
using namespace std;
@@ -285,21 +285,21 @@ int main(int argc, char** argv) {
isc::log::Logger logger_alpha("alpha");
isc::log::Logger logger_beta("beta");
- LOG_FATAL(logger_ex, MSG_WRITERR).arg("test1").arg("42");
- LOG_ERROR(logger_ex, MSG_RDLOCMES).arg("dummy/file");
- LOG_WARN(logger_ex, MSG_BADSTREAM).arg("example");
- LOG_WARN(logger_alpha, MSG_READERR).arg("a.txt").arg("dummy reason");
- LOG_INFO(logger_alpha, MSG_OPENIN).arg("example.msg").arg("dummy reason");
- LOG_DEBUG(logger_ex, 0, MSG_RDLOCMES).arg("example/0");
- LOG_DEBUG(logger_ex, 24, MSG_RDLOCMES).arg("example/24");
- LOG_DEBUG(logger_ex, 25, MSG_RDLOCMES).arg("example/25");
- LOG_DEBUG(logger_ex, 26, MSG_RDLOCMES).arg("example/26");
- LOG_FATAL(logger_beta, MSG_BADSEVERITY).arg("beta_fatal");
- LOG_ERROR(logger_beta, MSG_BADDESTINATION).arg("beta_error");
- LOG_WARN(logger_beta, MSG_BADSTREAM).arg("beta_warn");
- LOG_INFO(logger_beta, MSG_READERR).arg("beta").arg("info");
- LOG_DEBUG(logger_beta, 25, MSG_BADSEVERITY).arg("beta/25");
- LOG_DEBUG(logger_beta, 26, MSG_BADSEVERITY).arg("beta/26");
+ LOG_FATAL(logger_ex, LOG_WRITE_ERROR).arg("test1").arg("42");
+ LOG_ERROR(logger_ex, LOG_READING_LOCAL_FILE).arg("dummy/file");
+ LOG_WARN(logger_ex, LOG_BAD_STREAM).arg("example");
+ LOG_WARN(logger_alpha, LOG_READ_ERROR).arg("a.txt").arg("dummy reason");
+ LOG_INFO(logger_alpha, LOG_INPUT_OPEN_FAIL).arg("example.msg").arg("dummy reason");
+ LOG_DEBUG(logger_ex, 0, LOG_READING_LOCAL_FILE).arg("example/0");
+ LOG_DEBUG(logger_ex, 24, LOG_READING_LOCAL_FILE).arg("example/24");
+ LOG_DEBUG(logger_ex, 25, LOG_READING_LOCAL_FILE).arg("example/25");
+ LOG_DEBUG(logger_ex, 26, LOG_READING_LOCAL_FILE).arg("example/26");
+ LOG_FATAL(logger_beta, LOG_BAD_SEVERITY).arg("beta_fatal");
+ LOG_ERROR(logger_beta, LOG_BAD_DESTINATION).arg("beta_error");
+ LOG_WARN(logger_beta, LOG_BAD_STREAM).arg("beta_warn");
+ LOG_INFO(logger_beta, LOG_READ_ERROR).arg("beta").arg("info");
+ LOG_DEBUG(logger_beta, 25, LOG_BAD_SEVERITY).arg("beta/25");
+ LOG_DEBUG(logger_beta, 26, LOG_BAD_SEVERITY).arg("beta/26");
return (0);
}
diff --git a/src/lib/log/tests/logger_level_unittest.cc b/src/lib/log/tests/logger_level_unittest.cc
index 13d33b2..8c98091 100644
--- a/src/lib/log/tests/logger_level_unittest.cc
+++ b/src/lib/log/tests/logger_level_unittest.cc
@@ -19,8 +19,8 @@
#include <log/logger.h>
#include <log/logger_manager.h>
+#include <log/log_messages.h>
#include <log/logger_name.h>
-#include <log/messagedef.h>
using namespace isc;
using namespace isc::log;
diff --git a/src/lib/log/tests/logger_manager_unittest.cc b/src/lib/log/tests/logger_manager_unittest.cc
index 115c928..0bdfc74 100644
--- a/src/lib/log/tests/logger_manager_unittest.cc
+++ b/src/lib/log/tests/logger_manager_unittest.cc
@@ -27,7 +27,7 @@
#include <exceptions/exceptions.h>
#include <log/macros.h>
-#include <log/messagedef.h>
+#include <log/log_messages.h>
#include <log/logger.h>
#include <log/logger_level.h>
#include <log/logger_manager.h>
@@ -203,16 +203,16 @@ TEST_F(LoggerManagerTest, FileLogger) {
// keep the file open.
Logger logger(file_spec.getLoggerName());
- LOG_FATAL(logger, MSG_DUPMSGID).arg("test");
- ids.push_back(MSG_DUPMSGID);
+ LOG_FATAL(logger, LOG_DUPLICATE_MESSAGE_ID).arg("test");
+ ids.push_back(LOG_DUPLICATE_MESSAGE_ID);
- LOG_FATAL(logger, MSG_DUPLNS).arg("test");
- ids.push_back(MSG_DUPLNS);
+ LOG_FATAL(logger, LOG_DUPLICATE_NAMESPACE).arg("test");
+ ids.push_back(LOG_DUPLICATE_NAMESPACE);
}
LoggerManager::reset();
// At this point, the output file should contain two lines with messages
- // MSG_DUPMSGID and MSG_DUPLNS messages - test this.
+ // LOG_DUPLICATE_MESSAGE_ID and LOG_DUPLICATE_NAMESPACE messages - test this.
checkFileContents(file_spec.getFileName(), ids.begin(), ids.end());
// Re-open the file (we have to assume that it was closed when we
@@ -225,14 +225,14 @@ TEST_F(LoggerManagerTest, FileLogger) {
// Create a new instance of the logger and log three more messages.
Logger logger(file_spec.getLoggerName());
- LOG_FATAL(logger, MSG_IDNOTFND).arg("test");
- ids.push_back(MSG_IDNOTFND);
+ LOG_FATAL(logger, LOG_NO_SUCH_MESSAGE).arg("test");
+ ids.push_back(LOG_NO_SUCH_MESSAGE);
- LOG_FATAL(logger, MSG_INVMSGID).arg("test").arg("test2");
- ids.push_back(MSG_INVMSGID);
+ LOG_FATAL(logger, LOG_INVALID_MESSAGE_ID).arg("test").arg("test2");
+ ids.push_back(LOG_INVALID_MESSAGE_ID);
- LOG_FATAL(logger, MSG_NOMSGID).arg("42");
- ids.push_back(MSG_NOMSGID);
+ LOG_FATAL(logger, LOG_NO_MESSAGE_ID).arg("42");
+ ids.push_back(LOG_NO_MESSAGE_ID);
// Close the file and check again
LoggerManager::reset();
@@ -276,19 +276,19 @@ TEST_F(LoggerManagerTest, FileSizeRollover) {
// be rolled after the message is logged.
{
Logger logger(file_spec.getLoggerName());
- LOG_FATAL(logger, MSG_IDNOTFND).arg(big_arg);
- LOG_FATAL(logger, MSG_DUPLNS).arg(big_arg);
+ LOG_FATAL(logger, LOG_NO_SUCH_MESSAGE).arg(big_arg);
+ LOG_FATAL(logger, LOG_DUPLICATE_NAMESPACE).arg(big_arg);
}
// Check them.
LoggerManager::reset(); // Ensure files are closed
vector<MessageID> ids;
- ids.push_back(MSG_IDNOTFND);
+ ids.push_back(LOG_NO_SUCH_MESSAGE);
checkFileContents(prev_name[1], ids.begin(), ids.end());
ids.clear();
- ids.push_back(MSG_DUPLNS);
+ ids.push_back(LOG_DUPLICATE_NAMESPACE);
checkFileContents(prev_name[0], ids.begin(), ids.end());
// Log another message and check that the files have rotated and that
@@ -296,18 +296,18 @@ TEST_F(LoggerManagerTest, FileSizeRollover) {
manager.process(spec);
{
Logger logger(file_spec.getLoggerName());
- LOG_FATAL(logger, MSG_NOMSGTXT).arg(big_arg);
+ LOG_FATAL(logger, LOG_NO_MESSAGE_TEXT).arg(big_arg);
}
LoggerManager::reset(); // Ensure files are closed
// Check that the files have moved.
ids.clear();
- ids.push_back(MSG_DUPLNS);
+ ids.push_back(LOG_DUPLICATE_NAMESPACE);
checkFileContents(prev_name[1], ids.begin(), ids.end());
ids.clear();
- ids.push_back(MSG_NOMSGTXT);
+ ids.push_back(LOG_NO_MESSAGE_TEXT);
checkFileContents(prev_name[0], ids.begin(), ids.end());
// ... and check that the .3 version does not exist.
diff --git a/src/lib/log/tests/logger_support_unittest.cc b/src/lib/log/tests/logger_support_unittest.cc
index 7e5d23a..6a93652 100644
--- a/src/lib/log/tests/logger_support_unittest.cc
+++ b/src/lib/log/tests/logger_support_unittest.cc
@@ -14,7 +14,7 @@
#include <gtest/gtest.h>
#include <log/logger_support.h>
-#include <log/messagedef.h>
+#include <log/log_messages.h>
using namespace isc::log;
@@ -63,10 +63,10 @@ TEST(LoggerSupportTest, LoggingInitializationCheck) {
isc::log::Logger test_logger("test");
EXPECT_THROW(test_logger.isDebugEnabled(), LoggingNotInitialized);
- EXPECT_THROW(test_logger.info(MSG_OPENIN), LoggingNotInitialized);
+ EXPECT_THROW(test_logger.info(LOG_INPUT_OPEN_FAIL), LoggingNotInitialized);
// ... and check that they work when logging is initialized.
setLoggingInitialized(true);
EXPECT_NO_THROW(test_logger.isDebugEnabled());
- EXPECT_NO_THROW(test_logger.info(MSG_OPENIN));
+ EXPECT_NO_THROW(test_logger.info(LOG_INPUT_OPEN_FAIL));
}
diff --git a/src/lib/log/tests/logger_unittest.cc b/src/lib/log/tests/logger_unittest.cc
index b7858e5..edca9ce 100644
--- a/src/lib/log/tests/logger_unittest.cc
+++ b/src/lib/log/tests/logger_unittest.cc
@@ -20,7 +20,7 @@
#include <log/logger.h>
#include <log/logger_manager.h>
#include <log/logger_name.h>
-#include <log/messagedef.h>
+#include <log/log_messages.h>
using namespace isc;
using namespace isc::log;
diff --git a/src/lib/log/tests/message_dictionary_unittest.cc b/src/lib/log/tests/message_dictionary_unittest.cc
index ba33820..394fea0 100644
--- a/src/lib/log/tests/message_dictionary_unittest.cc
+++ b/src/lib/log/tests/message_dictionary_unittest.cc
@@ -29,7 +29,7 @@ using namespace std;
// and the latter should be present.
static const char* values[] = {
- "MSG_DUPLNS", "duplicate $NAMESPACE directive found",
+ "LOG_DUPLICATE_NAMESPACE", "duplicate $NAMESPACE directive found",
"NEWSYM", "new symbol added",
NULL
};
@@ -190,7 +190,7 @@ TEST_F(MessageDictionaryTest, GlobalTest) {
TEST_F(MessageDictionaryTest, GlobalLoadTest) {
vector<string>& duplicates = MessageInitializer::getDuplicates();
ASSERT_EQ(1, duplicates.size());
- EXPECT_EQ(string("MSG_DUPLNS"), duplicates[0]);
+ EXPECT_EQ(string("LOG_DUPLICATE_NAMESPACE"), duplicates[0]);
string text = MessageDictionary::globalDictionary().getText("NEWSYM");
EXPECT_EQ(string("new symbol added"), text);
diff --git a/src/lib/log/tests/message_reader_unittest.cc b/src/lib/log/tests/message_reader_unittest.cc
index 7b3ba5f..d0214a4 100644
--- a/src/lib/log/tests/message_reader_unittest.cc
+++ b/src/lib/log/tests/message_reader_unittest.cc
@@ -16,7 +16,7 @@
#include <string>
#include <gtest/gtest.h>
-#include <log/messagedef.h>
+#include <log/log_messages.h>
#include <log/message_dictionary.h>
#include <log/message_exception.h>
#include <log/message_reader.h>
@@ -102,8 +102,8 @@ processLineException(MessageReader& reader, const char* what,
TEST_F(MessageReaderTest, InvalidDirectives) {
// Check that a "$" with nothing else generates an error
- processLineException(reader_, "$", MSG_UNRECDIR);
- processLineException(reader_, "$xyz", MSG_UNRECDIR);
+ processLineException(reader_, "$", LOG_UNRECOGNISED_DIRECTIVE);
+ processLineException(reader_, "$xyz", LOG_UNRECOGNISED_DIRECTIVE);
}
// Check that it can parse a prefix
@@ -117,20 +117,20 @@ TEST_F(MessageReaderTest, Prefix) {
EXPECT_NO_THROW(reader_.processLine("$PREFIX"));
// Check a $PREFIX with multiple arguments is invalid
- processLineException(reader_, "$prefix A B", MSG_PRFEXTRARG);
+ processLineException(reader_, "$prefix A B", LOG_PREFIX_EXTRA_ARGS);
// Prefixes should be alphanumeric (with underscores) and not start
// with a number.
- processLineException(reader_, "$prefix ab[cd", MSG_PRFINVARG);
- processLineException(reader_, "$prefix 123", MSG_PRFINVARG);
- processLineException(reader_, "$prefix 1ABC", MSG_PRFINVARG);
+ processLineException(reader_, "$prefix ab[cd", LOG_PREFIX_INVALID_ARG);
+ processLineException(reader_, "$prefix 123", LOG_PREFIX_INVALID_ARG);
+ processLineException(reader_, "$prefix 1ABC", LOG_PREFIX_INVALID_ARG);
// A valid prefix should be accepted
EXPECT_NO_THROW(reader_.processLine("$PREFIX dlm__"));
EXPECT_EQ(string("dlm__"), reader_.getPrefix());
// And check that the parser fails on invalid prefixes...
- processLineException(reader_, "$prefix 1ABC", MSG_PRFINVARG);
+ processLineException(reader_, "$prefix 1ABC", LOG_PREFIX_INVALID_ARG);
// Check that we can clear the prefix as well
reader_.clearPrefix();
@@ -150,13 +150,13 @@ TEST_F(MessageReaderTest, Namespace) {
EXPECT_EQ(string(""), reader_.getNamespace());
// Check that a $NAMESPACE directive with no argument generates an error.
- processLineException(reader_, "$NAMESPACE", MSG_NSNOARG);
+ processLineException(reader_, "$NAMESPACE", LOG_NAMESPACE_NO_ARGS);
// Check a $NAMESPACE with multiple arguments is invalid
- processLineException(reader_, "$namespace A B", MSG_NSEXTRARG);
+ processLineException(reader_, "$namespace A B", LOG_NAMESPACE_EXTRA_ARGS);
// Namespaces should be alphanumeric (with underscores and colons)
- processLineException(reader_, "$namespace ab[cd", MSG_NSINVARG);
+ processLineException(reader_, "$namespace ab[cd", LOG_NAMESPACE_INVALID_ARG);
// A valid $NAMESPACE should be accepted
EXPECT_NO_THROW(reader_.processLine("$NAMESPACE isc"));
@@ -176,7 +176,7 @@ TEST_F(MessageReaderTest, Namespace) {
EXPECT_EQ(string("::"), reader_.getNamespace());
// ... and that another $NAMESPACE is rejected
- processLineException(reader_, "$NAMESPACE ABC", MSG_DUPLNS);
+ processLineException(reader_, "$NAMESPACE ABC", LOG_DUPLICATE_NAMESPACE);
}
// Check that it can parse a line
diff --git a/src/lib/log/tests/severity_test.sh.in b/src/lib/log/tests/severity_test.sh.in
index 0a304e0..124f36a 100755
--- a/src/lib/log/tests/severity_test.sh.in
+++ b/src/lib/log/tests/severity_test.sh.in
@@ -35,44 +35,44 @@ passfail() {
echo -n "1. runInitTest default parameters:"
cat > $tempfile << .
-FATAL [example] MSG_WRITERR error writing to test1: 42
-ERROR [example] MSG_RDLOCMES reading local message file dummy/file
-WARN [example] MSG_BADSTREAM bad log console output stream: example
-WARN [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
-INFO [example.alpha] MSG_OPENIN unable to open message file example.msg for input: dummy reason
-FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
-WARN [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
-INFO [example.beta] MSG_READERR error reading from message file beta: info
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+WARN [example] LOG_BAD_STREAM bad log console output stream: example
+WARN [example.alpha] LOG_READ_ERROR error reading from message file a.txt: dummy reason
+INFO [example.alpha] LOG_INPUT_OPEN_FAIL unable to open message file example.msg for input: dummy reason
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+WARN [example.beta] LOG_BAD_STREAM bad log console output stream: beta_warn
+INFO [example.beta] LOG_READ_ERROR error reading from message file beta: info
.
./logger_example -c stdout | cut -d' ' -f3- | diff $tempfile -
passfail $?
echo -n "2. Severity filter:"
cat > $tempfile << .
-FATAL [example] MSG_WRITERR error writing to test1: 42
-ERROR [example] MSG_RDLOCMES reading local message file dummy/file
-FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
.
./logger_example -c stdout -s error | cut -d' ' -f3- | diff $tempfile -
passfail $?
echo -n "3. Debug level:"
cat > $tempfile << .
-FATAL [example] MSG_WRITERR error writing to test1: 42
-ERROR [example] MSG_RDLOCMES reading local message file dummy/file
-WARN [example] MSG_BADSTREAM bad log console output stream: example
-WARN [example.alpha] MSG_READERR error reading from message file a.txt: dummy reason
-INFO [example.alpha] MSG_OPENIN unable to open message file example.msg for input: dummy reason
-DEBUG [example] MSG_RDLOCMES reading local message file example/0
-DEBUG [example] MSG_RDLOCMES reading local message file example/24
-DEBUG [example] MSG_RDLOCMES reading local message file example/25
-FATAL [example.beta] MSG_BADSEVERITY unrecognized log severity: beta_fatal
-ERROR [example.beta] MSG_BADDESTINATION unrecognized log destination: beta_error
-WARN [example.beta] MSG_BADSTREAM bad log console output stream: beta_warn
-INFO [example.beta] MSG_READERR error reading from message file beta: info
-DEBUG [example.beta] MSG_BADSEVERITY unrecognized log severity: beta/25
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+WARN [example] LOG_BAD_STREAM bad log console output stream: example
+WARN [example.alpha] LOG_READ_ERROR error reading from message file a.txt: dummy reason
+INFO [example.alpha] LOG_INPUT_OPEN_FAIL unable to open message file example.msg for input: dummy reason
+DEBUG [example] LOG_READING_LOCAL_FILE reading local message file example/0
+DEBUG [example] LOG_READING_LOCAL_FILE reading local message file example/24
+DEBUG [example] LOG_READING_LOCAL_FILE reading local message file example/25
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+WARN [example.beta] LOG_BAD_STREAM bad log console output stream: beta_warn
+INFO [example.beta] LOG_READ_ERROR error reading from message file beta: info
+DEBUG [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta/25
.
./logger_example -c stdout -s debug -d 25 | cut -d' ' -f3- | diff $tempfile -
passfail $?
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index c3b9939..4b25463 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -23,11 +23,11 @@ import errno
from isc.datasrc import sqlite3_ds
from isc.net import addr
import isc
-try:
- from pydnspp import *
-except ImportError as e:
- # C++ loadable module may not be installed;
- sys.stderr.write('[b10-xfrout] failed to import DNS or XFR module: %s\n' % str(e))
+try:
+ from pydnspp import *
+except ImportError as e:
+ # C++ loadable module may not be installed;
+ sys.stderr.write('[b10-xfrout] failed to import DNS or XFR module: %s\n' % str(e))
ZONE_NEW_DATA_READY_CMD = 'zone_new_data_ready'
_MAX_NOTIFY_NUM = 30
@@ -36,7 +36,6 @@ _EVENT_NONE = 0
_EVENT_READ = 1
_EVENT_TIMEOUT = 2
_NOTIFY_TIMEOUT = 1
-_IDLE_SLEEP_TIME = 0.5
# define the rcode for parsing notify reply message
_REPLY_OK = 0
@@ -55,10 +54,6 @@ class ZoneNotifyInfo:
'''This class keeps track of notify-out information for one zone.'''
def __init__(self, zone_name_, class_):
- '''notify_timeout_: absolute time for next notify reply. when the zone
- is preparing for sending notify message, notify_timeout_ is set to now,
- that means the first sending is triggered by the 'Timeout' mechanism.
- '''
self._notify_current = None
self._slave_index = 0
self._sock = None
@@ -67,9 +62,12 @@ class ZoneNotifyInfo:
self.zone_name = zone_name_
self.zone_class = class_
self.notify_msg_id = 0
+ # Absolute time for next notify reply. When the zone is preparing for
+ # sending notify message, notify_timeout_ is set to now, that means
+ # the first sending is triggered by the 'Timeout' mechanism.
self.notify_timeout = None
- self.notify_try_num = 0 #Notify times sending to one target.
-
+ self.notify_try_num = 0 # Notify times sending to one target.
+
def set_next_notify_target(self):
if self._slave_index < (len(self.notify_slaves) - 1):
self._slave_index += 1
@@ -104,9 +102,9 @@ class ZoneNotifyInfo:
class NotifyOut:
'''This class is used to handle notify logic for all zones(sending
- notify message to its slaves). notify service can be started by
+ notify message to its slaves). notify service can be started by
calling dispatcher(), and it can be stoped by calling shutdown()
- in another thread. '''
+ in another thread. '''
def __init__(self, datasrc_file, log=None, verbose=True):
self._notify_infos = {} # key is (zone_name, zone_class)
self._waiting_zones = []
@@ -120,12 +118,15 @@ class NotifyOut:
self._lock = threading.Lock()
self._db_file = datasrc_file
self._init_notify_out(datasrc_file)
+ # Use nonblock event to eliminate busy loop
+ # If there are no notifying zones, clear the event bit and wait.
+ self._nonblock_event = threading.Event()
def _init_notify_out(self, datasrc_file):
'''Get all the zones name and its notify target's address
- TODO, currently the zones are got by going through the zone
- table in database. There should be a better way to get them
- and also the setting 'also_notify', and there should be one
+ TODO, currently the zones are got by going through the zone
+ table in database. There should be a better way to get them
+ and also the setting 'also_notify', and there should be one
mechanism to cover the changed datasrc.'''
self._db_file = datasrc_file
for zone_name, zone_class in sqlite3_ds.get_zones_info(datasrc_file):
@@ -136,7 +137,7 @@ class NotifyOut:
self._notify_infos[zone_id].notify_slaves.append((item, 53))
def send_notify(self, zone_name, zone_class='IN'):
- '''Send notify to one zone's slaves, this function is
+ '''Send notify to one zone's slaves, this function is
the only interface for class NotifyOut which can be called
by other object.
Internally, the function only set the zone's notify-reply
@@ -160,6 +161,8 @@ class NotifyOut:
self._notify_infos[zone_id].prepare_notify_out()
self.notify_num += 1
self._notifying_zones.append(zone_id)
+ if not self._nonblock_event.isSet():
+ self._nonblock_event.set()
def _dispatcher(self, started_event):
started_event.set() # Let the master know we are alive already
@@ -178,8 +181,8 @@ class NotifyOut:
If one zone get the notify reply before timeout, call the
handle to process the reply. If one zone can't get the notify
- before timeout, call the handler to resend notify or notify
- next slave.
+ before timeout, call the handler to resend notify or notify
+ next slave.
The thread can be stopped by calling shutdown().
@@ -215,6 +218,9 @@ class NotifyOut:
# Ask it to stop
self._serving = False
+ if not self._nonblock_event.isSet():
+ # set self._nonblock_event to stop waiting for new notifying zones.
+ self._nonblock_event.set()
self._write_sock.send(SOCK_DATA) # make self._read_sock be readable.
# Wait for it
@@ -233,7 +239,7 @@ class NotifyOut:
then use the name in NS record rdata part to get the a/aaaa records
in the same zone. the targets listed in a/aaaa record rdata are treated
as the notify slaves.
- Note: this is the simplest way to get the address of slaves,
+ Note: this is the simplest way to get the address of slaves,
but not correct, it can't handle the delegation slaves, or the CNAME
and DNAME logic.
TODO. the function should be provided by one library.'''
@@ -241,8 +247,8 @@ class NotifyOut:
soa_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
ns_rr_name = []
for ns in ns_rrset:
- ns_rr_name.append(self._get_rdata_data(ns))
-
+ ns_rr_name.append(self._get_rdata_data(ns))
+
if len(soa_rrset) > 0:
sname = (soa_rrset[0][sqlite3_ds.RR_RDATA_INDEX].split(' '))[0].strip() #TODO, bad hardcode to get rdata part
if sname in ns_rr_name:
@@ -291,7 +297,7 @@ class NotifyOut:
else:
min_timeout = tmp_timeout
- block_timeout = _IDLE_SLEEP_TIME
+ block_timeout = None
if min_timeout is not None:
block_timeout = min_timeout - time.time()
if block_timeout < 0:
@@ -314,6 +320,14 @@ class NotifyOut:
# This is None only during some tests
if self._read_sock is not None:
valid_socks.append(self._read_sock)
+
+ # Currently, there is no notifying zones, waiting for zones to send notify
+ if block_timeout is None:
+ self._nonblock_event.clear()
+ self._nonblock_event.wait()
+ # has new notifying zone, check immediately
+ block_timeout = 0
+
try:
r_fds, w, e = select.select(valid_socks, [], [], block_timeout)
except select.error as err:
@@ -340,10 +354,10 @@ class NotifyOut:
return replied_zones, not_replied_zones
def _zone_notify_handler(self, zone_notify_info, event_type):
- '''Notify handler for one zone. The first notify message is
- always triggered by the event "_EVENT_TIMEOUT" since when
- one zone prepares to notify its slaves, its notify_timeout
- is set to now, which is used to trigger sending notify
+ '''Notify handler for one zone. The first notify message is
+ always triggered by the event "_EVENT_TIMEOUT" since when
+ one zone prepares to notify its slaves, its notify_timeout
+ is set to now, which is used to trigger sending notify
message when dispatcher() scanning zones. '''
tgt = zone_notify_info.get_current_notify_target()
if event_type == _EVENT_READ:
@@ -362,13 +376,13 @@ class NotifyOut:
self._log_msg('info', 'notify to %s: retried exceeded' % addr_to_str(tgt))
self._notify_next_target(zone_notify_info)
else:
- retry_timeout = _NOTIFY_TIMEOUT * pow(2, zone_notify_info.notify_try_num)
# set exponential backoff according rfc1996 section 3.6
+ retry_timeout = _NOTIFY_TIMEOUT * pow(2, zone_notify_info.notify_try_num)
zone_notify_info.notify_timeout = time.time() + retry_timeout
self._send_notify_message_udp(zone_notify_info, tgt)
def _notify_next_target(self, zone_notify_info):
- '''Notify next address for the same zone. If all the targets
+ '''Notify next address for the same zone. If all the targets
has been notified, notify the first zone in waiting list. '''
zone_notify_info.notify_try_num = 0
zone_notify_info.set_next_notify_target()
@@ -376,21 +390,23 @@ class NotifyOut:
if not tgt:
zone_notify_info.finish_notify_out()
with self._lock:
- self.notify_num -= 1
- self._notifying_zones.remove((zone_notify_info.zone_name,
- zone_notify_info.zone_class))
+ self.notify_num -= 1
+ self._notifying_zones.remove((zone_notify_info.zone_name,
+ zone_notify_info.zone_class))
# trigger notify out for waiting zones
if len(self._waiting_zones) > 0:
- zone_id = self._waiting_zones.pop(0)
+ zone_id = self._waiting_zones.pop(0)
self._notify_infos[zone_id].prepare_notify_out()
- self.notify_num += 1
+ self.notify_num += 1
self._notifying_zones.append(zone_id)
+ if not self._nonblock_event.isSet():
+ self._nonblock_event.set()
def _send_notify_message_udp(self, zone_notify_info, addrinfo):
- msg, qid = self._create_notify_message(zone_notify_info.zone_name,
+ msg, qid = self._create_notify_message(zone_notify_info.zone_name,
zone_notify_info.zone_class)
render = MessageRenderer()
- render.set_length_limit(512)
+ render.set_length_limit(512)
msg.to_wire(render)
zone_notify_info.notify_msg_id = qid
try:
@@ -405,7 +421,7 @@ class NotifyOut:
return True
def _create_rrset_from_db_record(self, record, zone_class):
- '''Create one rrset from one record of datasource, if the schema of record is changed,
+ '''Create one rrset from one record of datasource, if the schema of record is changed,
This function should be updated first. TODO, the function is copied from xfrout, there
should be library for creating one rrset. '''
rrtype_ = RRType(record[sqlite3_ds.RR_TYPE_INDEX])
@@ -425,7 +441,7 @@ class NotifyOut:
question = Question(Name(zone_name), RRClass(zone_class), RRType('SOA'))
msg.add_question(question)
# Add soa record to answer section
- soa_record = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
+ soa_record = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
rrset_soa = self._create_rrset_from_db_record(soa_record[0], zone_class)
msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
return msg, qid
@@ -443,10 +459,10 @@ class NotifyOut:
self._log_msg('error', errstr + 'bad flags')
return _BAD_QR
- if msg.get_qid() != zone_notify_info.notify_msg_id:
+ if msg.get_qid() != zone_notify_info.notify_msg_id:
self._log_msg('error', errstr + 'bad query ID')
return _BAD_QUERY_ID
-
+
question = msg.get_question()[0]
if question.get_name() != Name(zone_notify_info.zone_name):
self._log_msg('error', errstr + 'bad query name')
@@ -456,7 +472,7 @@ class NotifyOut:
self._log_msg('error', errstr + 'bad opcode')
return _BAD_OPCODE
except Exception as err:
- # We don't care what exception, just report it?
+ # We don't care what exception, just report it?
self._log_msg('error', errstr + str(err))
return _BAD_REPLY_PACKET
diff --git a/src/lib/python/isc/notify/tests/notify_out_test.py b/src/lib/python/isc/notify/tests/notify_out_test.py
index 305e38b..0eb77a3 100644
--- a/src/lib/python/isc/notify/tests/notify_out_test.py
+++ b/src/lib/python/isc/notify/tests/notify_out_test.py
@@ -117,7 +117,9 @@ class TestNotifyOut(unittest.TestCase):
def test_send_notify(self):
notify_out._MAX_NOTIFY_NUM = 2
+ self._notify._nonblock_event.clear()
self._notify.send_notify('example.net')
+ self.assertTrue(self._notify._nonblock_event.isSet())
self.assertEqual(self._notify.notify_num, 1)
self.assertEqual(self._notify._notifying_zones[0], ('example.net.', 'IN'))
@@ -126,7 +128,10 @@ class TestNotifyOut(unittest.TestCase):
self.assertEqual(self._notify._notifying_zones[1], ('example.com.', 'IN'))
# notify_num is equal to MAX_NOTIFY_NUM, append it to waiting_zones list.
+ self._notify._nonblock_event.clear()
self._notify.send_notify('example.com', 'CH')
+ # add waiting zones won't set nonblock_event.
+ self.assertFalse(self._notify._nonblock_event.isSet())
self.assertEqual(self._notify.notify_num, 2)
self.assertEqual(1, len(self._notify._waiting_zones))
@@ -348,7 +353,7 @@ class TestNotifyOut(unittest.TestCase):
def test_prepare_select_info(self):
timeout, valid_fds, notifying_zones = self._notify._prepare_select_info()
- self.assertEqual(notify_out._IDLE_SLEEP_TIME, timeout)
+ self.assertEqual(None, timeout)
self.assertListEqual([], valid_fds)
self._notify._notify_infos[('example.net.', 'IN')]._sock = 1
@@ -372,7 +377,32 @@ class TestNotifyOut(unittest.TestCase):
def test_shutdown(self):
thread = self._notify.dispatcher()
self.assertTrue(thread.is_alive())
+ # nonblock_event won't be setted since there are no notifying zones.
+ self.assertFalse(self._notify._nonblock_event.isSet())
+
+ # set nonblock_event manually
+ self._notify._nonblock_event.set()
+ # nonblock_event will be cleared soon since there are no notifying zones.
+ while (self._notify._nonblock_event.isSet()):
+ pass
+
+ # send notify
+ example_net_info = self._notify._notify_infos[('example.net.', 'IN')]
+ example_net_info.notify_slaves = [('127.0.0.1', 53)]
+ example_net_info.create_socket('127.0.0.1')
+ self._notify.send_notify('example.net')
+ self.assertTrue(self._notify._nonblock_event.isSet())
+ # set notify_try_num to _MAX_NOTIFY_TRY_NUM, zone 'example.net' will be removed
+ # from notifying zones soon and nonblock_event will be cleared since there is no
+ # notifying zone left.
+ example_net_info.notify_try_num = notify_out._MAX_NOTIFY_TRY_NUM
+ while (self._notify._nonblock_event.isSet()):
+ pass
+
+ self.assertFalse(self._notify._nonblock_event.isSet())
self._notify.shutdown()
+ # nonblock_event should have been setted to stop waiting.
+ self.assertTrue(self._notify._nonblock_event.isSet())
self.assertFalse(thread.is_alive())
if __name__== "__main__":
diff --git a/src/lib/server_common/Makefile.am b/src/lib/server_common/Makefile.am
index a3063ba..d576104 100644
--- a/src/lib/server_common/Makefile.am
+++ b/src/lib/server_common/Makefile.am
@@ -17,13 +17,15 @@ AM_CXXFLAGS += -Wno-unused-parameter
endif
lib_LTLIBRARIES = libserver_common.la
-libserver_common_la_SOURCES = portconfig.h portconfig.cc
+libserver_common_la_SOURCES = client.h client.cc
libserver_common_la_SOURCES += keyring.h keyring.cc
+libserver_common_la_SOURCES += portconfig.h portconfig.cc
libserver_common_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/config/libcfgclient.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
+libserver_common_la_LIBADD += $(top_builddir)/src/lib/acl/libacl.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
CLEANFILES = *.gcno *.gcda
diff --git a/src/lib/server_common/client.cc b/src/lib/server_common/client.cc
new file mode 100644
index 0000000..31dee88
--- /dev/null
+++ b/src/lib/server_common/client.cc
@@ -0,0 +1,75 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <acl/ip_check.h>
+
+#include <asiolink/io_endpoint.h>
+#include <asiolink/io_message.h>
+
+#include <server_common/client.h>
+
+using namespace isc::acl;
+using namespace isc::server_common;
+using namespace isc::asiolink;
+
+struct Client::ClientImpl {
+ ClientImpl(const IOMessage& request_message) :
+ request_(request_message),
+ request_src_(request_.getRemoteEndpoint().getSockAddr())
+ {}
+
+ const IOMessage& request_;
+ const IPAddress request_src_;
+};
+
+Client::Client(const IOMessage& request_message) :
+ impl_(new ClientImpl(request_message))
+{}
+
+Client::~Client() {
+ delete impl_;
+}
+
+const IOEndpoint&
+Client::getRequestSourceEndpoint() const {
+ return (impl_->request_.getRemoteEndpoint());
+}
+
+const IPAddress&
+Client::getRequestSourceIPAddress() const {
+ return (impl_->request_src_);
+}
+
+std::string
+Client::toText() const {
+ std::stringstream ss;
+ ss << impl_->request_.getRemoteEndpoint().getAddress().toText()
+ << '#' << impl_->request_.getRemoteEndpoint().getPort();
+ return (ss.str());
+}
+
+std::ostream&
+isc::server_common::operator<<(std::ostream& os, const Client& client) {
+ return (os << client.toText());
+}
+
+template <>
+bool
+IPCheck<Client>::matches(const Client& client) const {
+ const IPAddress& request_src(client.getRequestSourceIPAddress());
+ return (compare(request_src.getData(), request_src.getFamily()));
+}
diff --git a/src/lib/server_common/client.h b/src/lib/server_common/client.h
new file mode 100644
index 0000000..148e069
--- /dev/null
+++ b/src/lib/server_common/client.h
@@ -0,0 +1,165 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __CLIENT_H
+#define __CLIENT_H 1
+
+#include <string>
+#include <ostream>
+
+#include <boost/noncopyable.hpp>
+
+#include <acl/ip_check.h>
+
+namespace isc {
+namespace asiolink {
+class IOMessage;
+class IOEndpoint;
+}
+
+namespace acl {
+struct IPAddress;
+}
+
+namespace server_common {
+
+/// A DNS client with a single request context.
+///
+/// The \c Client class represents a DNS client with information of one
+/// DNS request (e.g., a query). The information includes the source and
+/// destination IP addresses of the request, information of the DNS request
+/// message such as the query name or (if provided) TSIG key information.
+///
+/// A \c Client class object is expected to be constructed on receiving a
+/// new request with lower level information such as IP addresses and is
+/// updated with DNS specific information as the server processes the request.
+/// It is also expected to be used as the primary interface for request
+/// processing such as query handling or access control.
+///
+/// Furthermore, to minimize the overhead, this class would be further
+/// extended so that it can be reusable with an additional method to reset
+/// the internal information.
+///
+/// In the current initial implementation, however, it only contains the
+/// lower level information in the form of \c IOMessage object and cannot
+/// be reused (it must be constructed for every new request). Also, the
+/// only actual usage of this class at this moment is for ACL handling.
+///
+/// A \c Client class object is generally assumed to be valid throughout
+/// the processing of a single request, and then be destructed or (when
+/// supported) reset. To avoid it is copied and held accidentally beyond
+/// the expected valid period, it is intentionally made non copyable.
+///
+/// Notes about other possibilities: we may want to abstract it further,
+/// so that it can also be used for DHCP. In that case, we'd subclass a
+/// base client class for DNS specific clients and DHCP specific clients.
+/// We might also want to separate DNS clients for authoritative servers
+/// and clients for the resolver, especially because the former could be
+/// simpler with performance optimizations.
+class Client : boost::noncopyable {
+public:
+ ///
+ /// \name Constructors and Destructor
+ ///
+ //@{
+ /// The constructor.
+ ///
+ /// This initial version of constructor takes an \c IOMessage object
+ /// that is supposed to represent a DNS request message sent from an
+ /// external client (but the constructor does not perform any assumption
+ /// check on the given \c IOMessage).
+ ///
+ /// If and when we extend the behavior and responsibility
+ /// of this class, this version of constructor will probably be
+ /// deprecated.
+ ///
+ /// \c request_message must be valid throughout the lifetime of the client.
+ ///
+ /// \exception None
+ /// \param request_message Refers to \c IOMessage corresponding to some
+ /// DNS request message.
+ explicit Client(const isc::asiolink::IOMessage& request_message);
+
+ /// The destructor
+ ~Client();
+ //@}
+
+ /// Return the client's endpoint of the request.
+ ///
+ /// This should be identical to the result of \c getRemoteEndpoint()
+ /// called on \c request_message passed to the constructor.
+ ///
+ /// \exception None
+ const isc::asiolink::IOEndpoint& getRequestSourceEndpoint() const;
+
+ /// Return the IP address part of the client request's endpoint.
+ ///
+ /// The resulting \c IPAddress can be constructed using
+ /// \c getRequestSourceEndpoint(), and in that sense this method is
+ /// redundant. But this implementation internally constructs the
+ /// \c IPAddress on construction and always returns a reference to it,
+ /// and should be more efficient. It is provided so that it can be
+ /// called multiple times in a complicated ACL with minimum cost.
+ ///
+ /// \exception None
+ const isc::acl::IPAddress& getRequestSourceIPAddress() const;
+
+ /// Convert the Client to a string.
+ ///
+ /// (In the initial implementation) the format of the resulting string
+ /// is as follows:
+ /// \code <IP address>#<port>
+ /// \endcode
+ /// The IP address is the textual representation of the client's IP
+ /// address, which is the source address of the request the client has
+ /// sent. The port is the UDP or TCP of the client's end of the request.
+ ///
+ /// \exception std::bad_alloc Internal resource allocation fails
+ std::string toText() const;
+
+private:
+ struct ClientImpl;
+ ClientImpl* impl_;
+};
+
+/// \brief Insert the \c Client as a string into stream.
+///
+/// This method convert \c client into a string and inserts it into the
+/// output stream \c os.
+///
+/// \param os A \c std::ostream object on which the insertion operation is
+/// performed.
+/// \param edns A reference to an \c Client object output by the operation.
+/// \return A reference to the same \c std::ostream object referenced by
+/// parameter \c os after the insertion operation.
+std::ostream& operator<<(std::ostream& os, const Client& client);
+}
+
+namespace acl {
+/// The specialization of \c IPCheck for access control with \c Client.
+///
+/// It returns \c true if the source IP address of the client's request
+/// matches the expression encapsulated in the \c IPCheck, and returns
+/// \c false if not.
+template <>
+bool IPCheck<server_common::Client>::matches(
+ const server_common::Client& client) const;
+}
+}
+
+#endif // __CLIENT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/server_common/tests/Makefile.am b/src/lib/server_common/tests/Makefile.am
index ecdb2d9..3c061c2 100644
--- a/src/lib/server_common/tests/Makefile.am
+++ b/src/lib/server_common/tests/Makefile.am
@@ -26,6 +26,7 @@ TESTS =
if HAVE_GTEST
TESTS += run_unittests
run_unittests_SOURCES = run_unittests.cc
+run_unittests_SOURCES += client_unittest.cc
run_unittests_SOURCES += portconfig_unittest.cc
run_unittests_SOURCES += keyring_test.cc
nodist_run_unittests_SOURCES = data_path.h
@@ -37,6 +38,8 @@ run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/server_common/tests/client_unittest.cc b/src/lib/server_common/tests/client_unittest.cc
new file mode 100644
index 0000000..34a90a2
--- /dev/null
+++ b/src/lib/server_common/tests/client_unittest.cc
@@ -0,0 +1,127 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sys/socket.h>
+#include <string.h>
+
+#include <string>
+#include <sstream>
+
+#include <boost/scoped_ptr.hpp>
+
+#include <acl/ip_check.h>
+
+#include <asiolink/io_address.h>
+#include <asiolink/io_socket.h>
+#include <asiolink/io_message.h>
+
+#include <server_common/client.h>
+
+#include <gtest/gtest.h>
+
+using namespace boost;
+using namespace isc::acl;
+using namespace isc::asiolink;
+using namespace isc::server_common;
+
+namespace {
+
+class ClientTest : public ::testing::Test {
+protected:
+ ClientTest() {
+ endpoint4.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"),
+ 53214));
+ endpoint6.reset(IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1"), 53216));
+ request4.reset(new IOMessage(NULL, 0, IOSocket::getDummyUDPSocket(),
+ *endpoint4));
+ request6.reset(new IOMessage(NULL, 0, IOSocket::getDummyTCPSocket(),
+ *endpoint6));
+ client4.reset(new Client(*request4));
+ client6.reset(new Client(*request6));
+ }
+ scoped_ptr<const IOEndpoint> endpoint4;
+ scoped_ptr<const IOEndpoint> endpoint6;
+ scoped_ptr<const IOMessage> request4;
+ scoped_ptr<const IOMessage> request6;
+ scoped_ptr<const Client> client4;
+ scoped_ptr<const Client> client6;
+};
+
+TEST_F(ClientTest, constructIPv4) {
+ EXPECT_EQ(AF_INET, client4->getRequestSourceEndpoint().getFamily());
+ EXPECT_EQ(IPPROTO_UDP, client4->getRequestSourceEndpoint().getProtocol());
+ EXPECT_EQ("192.0.2.1",
+ client4->getRequestSourceEndpoint().getAddress().toText());
+ EXPECT_EQ(53214, client4->getRequestSourceEndpoint().getPort());
+
+ const uint8_t expected_data[] = { 192, 0, 2, 1 };
+ EXPECT_EQ(AF_INET, client4->getRequestSourceIPAddress().getFamily());
+ ASSERT_EQ(4, client4->getRequestSourceIPAddress().getLength());
+ EXPECT_EQ(0, memcmp(expected_data,
+ client4->getRequestSourceIPAddress().getData(), 4));
+}
+
+TEST_F(ClientTest, constructIPv6) {
+ EXPECT_EQ(AF_INET6, client6->getRequestSourceEndpoint().getFamily());
+ EXPECT_EQ(IPPROTO_TCP, client6->getRequestSourceEndpoint().getProtocol());
+ EXPECT_EQ("2001:db8::1",
+ client6->getRequestSourceEndpoint().getAddress().toText());
+ EXPECT_EQ(53216, client6->getRequestSourceEndpoint().getPort());
+
+ const uint8_t expected_data[] = { 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01 };
+ EXPECT_EQ(AF_INET6, client6->getRequestSourceIPAddress().getFamily());
+ ASSERT_EQ(16, client6->getRequestSourceIPAddress().getLength());
+ EXPECT_EQ(0, memcmp(expected_data,
+ client6->getRequestSourceIPAddress().getData(), 16));
+}
+
+TEST_F(ClientTest, ACLCheckIPv4) {
+ // Exact match
+ EXPECT_TRUE(IPCheck<Client>("192.0.2.1").matches(*client4));
+ // Exact match (negative)
+ EXPECT_FALSE(IPCheck<Client>("192.0.2.53").matches(*client4));
+ // Prefix match
+ EXPECT_TRUE(IPCheck<Client>("192.0.2.0/24").matches(*client4));
+ // Prefix match (negative)
+ EXPECT_FALSE(IPCheck<Client>("192.0.1.0/24").matches(*client4));
+ // Address family mismatch (the first 4 bytes of the IPv6 address has the
+ // same binary representation as the client's IPv4 address, which
+ // shouldn't confuse the match logic)
+ EXPECT_FALSE(IPCheck<Client>("c000:0201::").matches(*client4));
+}
+
+TEST_F(ClientTest, ACLCheckIPv6) {
+ // The following are a set of tests of the same concept as ACLCheckIPv4
+ EXPECT_TRUE(IPCheck<Client>("2001:db8::1").matches(*client6));
+ EXPECT_FALSE(IPCheck<Client>("2001:db8::53").matches(*client6));
+ EXPECT_TRUE(IPCheck<Client>("2001:db8::/64").matches(*client6));
+ EXPECT_FALSE(IPCheck<Client>("2001:db8:1::/64").matches(*client6));
+ EXPECT_FALSE(IPCheck<Client>("32.1.13.184").matches(*client6));
+}
+
+TEST_F(ClientTest, toText) {
+ EXPECT_EQ("192.0.2.1#53214", client4->toText());
+ EXPECT_EQ("2001:db8::1#53216", client6->toText());
+}
+
+// test operator<<. We simply confirm it appends the result of toText().
+TEST_F(ClientTest, LeftShiftOperator) {
+ std::ostringstream oss;
+ oss << *client4 << "more text";
+ EXPECT_EQ(client4->toText() + std::string("more text"), oss.str());
+}
+}
diff --git a/tools/system_messages.py b/tools/system_messages.py
new file mode 100644
index 0000000..6cf3ce9
--- /dev/null
+++ b/tools/system_messages.py
@@ -0,0 +1,413 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Produce System Messages Manual
+#
+# This tool reads all the .mes files in the directory tree whose root is given
+# on the command line and interprets them as BIND 10 message files. It pulls
+# all the messages and description out, sorts them by message ID, and writes
+# them out as a single (formatted) file.
+#
+# Invocation:
+# The code is invoked using the command line:
+#
+# python system_messages.py [-o <output-file>] <top-source-directory>
+#
+# If no output file is specified, output is written to stdout.
+
+import re
+import os
+import sys
+from optparse import OptionParser
+
+# Main dictionary holding all the messages. The messages are accumulated here
+# before being printed in alphabetical order.
+dictionary = {}
+
+# The structure of the output page is:
+#
+# header
+# message
+# separator
+# message
+# separator
+# :
+# separator
+# message
+# trailer
+#
+# (Indentation is not relevant - it has only been added to the above
+# illustration to make the structure clearer.) The text of these section is:
+
+# Header - this is output before anything else.
+SEC_HEADER="""<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
+<!ENTITY mdash "—" >
+<!ENTITY % version SYSTEM "version.ent">
+%version;
+]>
+<book>
+ <?xml-stylesheet href="bind10-guide.css" type="text/css"?>
+
+ <bookinfo>
+ <title>BIND 10 Messages Manual</title>
+
+ <copyright>
+ <year>2011</year><holder>Internet Systems Consortium, Inc.</holder>
+ </copyright>
+
+ <abstract>
+ <para>BIND 10 is a Domain Name System (DNS) suite managed by
+ Internet Systems Consortium (ISC). It includes DNS libraries
+ and modular components for controlling authoritative and
+ recursive DNS servers.
+ </para>
+ <para>
+ This is the messages manual for BIND 10 version &__VERSION__;.
+ The most up-to-date version of this document, along with
+ other documents for BIND 10, can be found at
+ <ulink url="http://bind10.isc.org/docs"/>.
+ </para>
+ </abstract>
+
+ <releaseinfo>This is the messages manual for BIND 10 version
+ &__VERSION__;.</releaseinfo>
+ </bookinfo>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This document lists each message that can be logged by the
+ programs in the BIND 10 package. Each entry in this manual
+ is of the form:
+ <screen>IDENTIFICATION message-text</screen>
+ ... where "IDENTIFICATION" is the message identification included
+ in each message logged and "message-text" is the accompanying
+ message text. The "message-text" may include placeholders of the
+ form "%1", "%2" etc.; these parameters are replaced by relevant
+ values when the message is logged.
+ </para>
+ <para>
+ Each entry is also accompanied by a description giving more
+ information about the circumstances that result in the message
+ being logged.
+ </para>
+ <para>
+ For information on configuring and using BIND 10 logging,
+ refer to the <ulink url="bind10-guide.html">BIND 10 Guide</ulink>.
+ </para>
+ </chapter>
+
+ <chapter id="messages">
+ <title>BIND 10 Messages</title>
+ <para>
+ <variablelist>
+"""
+
+# This is output once for each message. The string contains substitution
+# tokens: $I is replaced by the message identification, $T by the message text,
+# and $D by the message description.
+SEC_MESSAGE = """<varlistentry id="$I">
+<term>$I $T</term>
+<listitem><para>
+$D
+</para></listitem>
+</varlistentry>"""
+
+# A description may contain blank lines intended to separate paragraphs. If so,
+# each blank line is replaced by the following.
+SEC_BLANK = "</para><para>"
+
+# The separator is copied to the output verbatim after each message except
+# the last.
+SEC_SEPARATOR = ""
+
+# The trailier is copied to the output verbatim after the last message.
+SEC_TRAILER = """ </variablelist>
+ </para>
+ </chapter>
+</book>"""
+
+
+def reportError(filename, what):
+ """Report an error and exit"""
+ print("*** ERROR in ", filename, file=sys.stderr)
+ print("*** REASON: ", what, file=sys.stderr)
+ print("*** System message generator terminating", file=sys.stderr)
+ sys.exit(1)
+
+
+
+def replaceTag(string):
+ """Replaces the '<' and '>' in text about to be inserted into the template
+ sections above with < and > to avoid problems with message text
+ being interpreted as XML text.
+ """
+ string1 = string.replace("<", "<")
+ string2 = string1.replace(">", ">")
+ return string2
+
+
+
+def replaceBlankLines(lines):
+ """Replaces blank lines in an array with the contents of the 'blank'
+ section.
+ """
+ result = []
+ for l in lines:
+ if len(l) == 0:
+ result.append(SEC_BLANK)
+ else:
+ result.append(l)
+
+ return result
+
+
+
+# Printing functions
+def printHeader():
+ print(SEC_HEADER)
+
+def printSeparator():
+ print(SEC_SEPARATOR)
+
+def printMessage(msgid):
+ # In the message ID, replace "<" and ">" with XML-safe versions and
+ # substitute into the data.
+ m1 = SEC_MESSAGE.replace("$I", replaceTag(msgid))
+
+ # Do the same for the message text.
+ m2 = m1.replace("$T", replaceTag(dictionary[msgid]['text']))
+
+ # Do the same for the description then replace blank lines with the
+ # specified separator. (We do this in that order to avoid replacing
+ # the "<" and ">" in the XML tags in the separator.)
+ desc1 = [replaceTag(l) for l in dictionary[msgid]['description']]
+ desc2 = replaceBlankLines(desc1)
+
+ # Join the lines together to form a single string and insert into
+ # current text.
+ m3 = m2.replace("$D", "\n".join(desc2))
+
+ print(m3)
+
+def printTrailer():
+ print(SEC_TRAILER)
+
+
+
+def removeEmptyLeadingTrailing(lines):
+ """Removes leading and trailing empty lines.
+
+ A list of strings is passed as argument, some of which may be empty.
+ This function removes from the start and end of list a contiguous
+ sequence of empty lines and returns the result. Embedded sequence of
+ empty lines are not touched.
+
+ Parameters:
+ lines List of strings to be modified.
+
+ Return:
+ Input list of strings with leading/trailing blank line sequences
+ removed.
+ """
+
+ retlines = []
+
+ # Dispose of degenerate case of empty array
+ if len(lines) == 0:
+ return retlines
+
+ # Search for first non-blank line
+ start = 0
+ while start < len(lines):
+ if len(lines[start]) > 0:
+ break
+ start = start + 1
+
+ # Handle case when entire list is empty
+ if start >= len(lines):
+ return retlines
+
+ # Search for last non-blank line
+ finish = len(lines) - 1
+ while finish >= 0:
+ if len(lines[finish]) > 0:
+ break
+ finish = finish - 1
+
+ retlines = lines[start:finish + 1]
+ return retlines
+
+
+
+def addToDictionary(msgid, msgtext, desc, filename):
+ """Add the current message ID and associated information to the global
+ dictionary. If a message with that ID already exists, loop appending
+ suffixes of the form "(n)" to it until one is found that doesn't.
+
+ Parameters:
+ msgid Message ID
+ msgtext Message text
+ desc Message description
+ filename File from which the message came. Currently this is
+ not used, but a future enhancement may wish to include the
+ name of the message file in the messages manual.
+ """
+
+ # If the ID is in the dictionary, append a "(n)" to the name - this wil
+ # flag that there are multiple instances. (However, this is an error -
+ # each ID should be unique in BIND-10.)
+ if msgid in dictionary:
+ i = 1
+ while msgid + " (" + str(i) + ")" in dictionary:
+ i = i + 1
+ msgid = msgid + " (" + str(i) + ")"
+
+ # Remove leading and trailing blank lines in the description, then
+ # add everything into a subdictionary which is then added to the main
+ # one.
+ details = {}
+ details['text'] = msgtext
+ details['description'] = removeEmptyLeadingTrailing(desc)
+ details['filename'] = filename
+ dictionary[msgid] = details
+
+
+
+def processFileContent(filename, lines):
+ """Processes file content. Messages and descriptions are identified and
+ added to a dictionary (keyed by message ID). If the key already exists,
+ a numeric suffix is added to it.
+
+ Parameters:
+ filename Name of the message file being processed
+ lines Lines read from the file
+ """
+
+ prefix = "" # Last prefix encountered
+ msgid = "" # Last message ID encountered
+ msgtext = "" # Text of the message
+ description = [] # Description
+
+ for l in lines:
+ if l.startswith("$"):
+ # Starts with "$". Ignore anything other than $PREFIX
+ words = re.split("\s+", l)
+ if words[0].upper() == "$PREFIX":
+ if len(words) == 1:
+ prefix = ""
+ else:
+ prefix = words[1]
+
+ elif l.startswith("%"):
+ # Start of a message. Add the message we were processing to the
+ # dictionary and clear everything apart from the file name.
+ if msgid != "":
+ addToDictionary(msgid, msgtext, description, filename)
+
+ msgid = ""
+ msgtext = ""
+ description = []
+
+ # Start of a message
+ l = l[1:].strip() # Remove "%" and trim leading spaces
+ if len(l) == 0:
+ printError(filename, "Line with single % found")
+ next
+
+ # Split into words. The first word is the message ID
+ words = re.split("\s+", l)
+ msgid = (prefix + words[0]).upper()
+ msgtext = l[len(words[0]):].strip()
+
+ else:
+ # Part of a description, so add to the current description array
+ description.append(l)
+
+ # All done, add the last message to the global dictionaty.
+ if msgid != "":
+ addToDictionary(msgid, msgtext, description, filename)
+
+
+
+def processFile(filename):
+ """Processes a file by reading it in and stripping out all comments and
+ and directives. Leading and trailing blank lines in the file are removed
+ and the remainder passed for message processing.
+
+ Parameters:
+ filename Name of the message file to process
+ """
+ lines = open(filename).readlines();
+
+ # Trim leading and trailing spaces from each line, and remove comments.
+ lines = [l.strip() for l in lines]
+ lines = [l for l in lines if not l.startswith("#")]
+
+ # Remove leading/trailing empty line sequences from the result
+ lines = removeEmptyLeadingTrailing(lines)
+
+ # Interpret content
+ processFileContent(filename, lines)
+
+
+
+def processAllFiles(root):
+ """Iterates through all files in the tree starting at the given root and
+ calls processFile for all .mes files found.
+
+ Parameters:
+ root Directory that is the root of the BIND-10 source tree
+ """
+ for (path, dirs, files) in os.walk(root):
+
+ # Identify message files
+ mes_files = [f for f in files if f.endswith(".mes")]
+
+ # ... and process each file in the list
+ for m in mes_files:
+ processFile(path + os.sep + m)
+
+
+# Main program
+if __name__ == "__main__":
+ parser = OptionParser(usage="Usage: %prog [--help | options] root")
+ parser.add_option("-o", "--output", dest="output", default=None,
+ metavar="FILE",
+ help="output file name (default to stdout)")
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ parser.error("Must supply directory at which to begin search")
+ elif len(args) > 1:
+ parser.error("Only a single root directory can be given")
+
+ # Redirect output if specified (errors are written to stderr)
+ if options.output is not None:
+ sys.stdout = open(options.output, 'w')
+
+ # Read the files and load the data
+ processAllFiles(args[0])
+
+ # Now just print out everything we've read (in alphabetical order).
+ count = 1
+ printHeader()
+ for msgid in sorted(dictionary):
+ if count > 1:
+ printSeparator()
+ count = count + 1
+ printMessage(msgid)
+ printTrailer()
More information about the bind10-changes
mailing list