BIND 10 trac519, updated. 67d8e93028e014f644868fede3570abb28e5fb43 Merge branch 'master' into trac519
BIND 10 source code commits
bind10-changes at lists.isc.org
Tue Aug 23 12:08:55 UTC 2011
The branch, trac519 has been updated
via 67d8e93028e014f644868fede3570abb28e5fb43 (commit)
via 73df015104eb5ac8934ff1176c24079e6e9b09c3 (commit)
via 586d49827ebaa2cf2c70dc030c5830afb1fb89f5 (commit)
via 2b755575c9d0277980008df99f92c38dd6b3a420 (commit)
via 58d7fda0fd2efc2d4bccfdcb55ce6ba42af83aa0 (commit)
via 7f08fc3123ef7d26a2e61dd29455c07510404a7e (commit)
via 9d48d1964569b49be17afc3e20085a23544a32de (commit)
via 5c6391cca55baec236b813b4c2e2b7699595559d (commit)
via 6318db7dc90cb6656cc2a1f8e875f2258f6a4343 (commit)
via 35a0136d56de7faca280666ba40bb1b87a85fff6 (commit)
via b97162729a3ad4214e5f6b85452a27904b8f34ca (commit)
via 9d3e78f0d8075ad62391ed005e1e82f79f05e2ca (commit)
via 9622aed753d953a763a9c0ac25cd7868d257bad7 (commit)
via 01d8d0f13289ecdf9996d6d5d26ac0d43e30549c (commit)
via 7fe505d131d2a13a6a412789474d92493ade65dd (commit)
via 954143a2748110c720d28df49159ed4f0bc1a1a2 (commit)
via 8cfa0f76baf92f82bf2865b3557c0a2094e81cb4 (commit)
via bdebd1afa4bf82120c66d9ee8d8cab500ab0b606 (commit)
via 451086b203ef3e4611487630225a7650ad9322e7 (commit)
via c0c7b21ab57bb9445329fed9e1451c534aab6a67 (commit)
via 59add6ec0f7e96ee81a7b9970228b8f795b01997 (commit)
via 1b421982a6fcadebc72d3d6ee7a4e34eec61a25d (commit)
via 45630ca90e823247c429f82b338244a9bba9baf4 (commit)
via 36c6035855db0ae87a64a0d169e0230d936e3e64 (commit)
via d88becea33630677dbb5123cd72fa8695512311a (commit)
via 171088e69ff96a2e242cfdf98e8d1f0415d4c172 (commit)
via 9a8667331d9a7179331516e7bb1f3aa942bf8218 (commit)
via fb7c63f65c121b372b1ea23a823cb17afdcd1dfd (commit)
via 58d6de47f6e189ff0b648b4f2f74e6d5df85d749 (commit)
via 2ecb4add323e3c4ba56641d28e35dd79013ff9cf (commit)
via 540c6a5f5b25d935a8193fd835c1ba83dba02fd5 (commit)
via 507b231626a2e0289288f48b1e4613b569cdd8b2 (commit)
via ea8bdd6cb6894855f109b8d19ce104ae9a4b9cb5 (commit)
via 4a7d63179ae732ede6bdc77c393a1cfd9b0b58ca (commit)
via ba9f03b99b6e1dd46d9b11eb1bac629789c8f94a (commit)
via 6ea996c67dff319e332b465ed450ee50b97de4f7 (commit)
via bbc661e3c38f02b4a1fb50bd4e058a22150b0087 (commit)
via 373a792a4706be2619dd1d1820f949858620bc77 (commit)
via f9b1950752ff1d3041d776a5d50ec2d0ddb8065a (commit)
via fe8f3314300936f71cc89535ecd3f0f3cad3804c (commit)
via 2bb551be853647c25005d1ab167e17ada7a5bfc5 (commit)
via 7dd0238dbd4ed086ca7217ec50d8f0a5be3179f3 (commit)
via 7a9a19d6431df02d48a7bc9de44f08d9450d3a37 (commit)
via d72e84456e23ac19c2c12a186ba429cd2e4985cd (commit)
via deefb84c32a289f8deda6550518a48b01a6032c0 (commit)
via 83f8d6de769a33f51b83cd81efe178db162e95e1 (commit)
via db9e3c398b854c83a65eb227ab9ff40dfae1145b (commit)
via 77030a4789285a3f08fbdd9621a384a9e008f4a8 (commit)
via a030033e5a53dd18157509c6c101340688d16011 (commit)
via 485e0ba7f7fe11e4d28e3eec2be835157521a6e9 (commit)
via 6a55aa002c8f3b701dbb8291cd9a8e21534c6974 (commit)
via 7cdda20613f7ed7b18e7fe210ae0f6a87054dbf3 (commit)
via 745ebcec892cb27feec663de9218ae3647c7b8a5 (commit)
via 1e702fae4c9adbd7134a739dee28c868a15f0b3e (commit)
via 44bd4bc6dc7df56905071933a542e00e91f84837 (commit)
via 006d0fab3f44ec9caa2b23da3866bbbd841cd5d3 (commit)
via 68da925f226966a2760a193e9f9a3cdbdfcfacec (commit)
via 09e8c50958a1fca313c2be427c2991c39798f90f (commit)
via d1b580f1780e5ebdbbf6fe8655cc923fbd5c02de (commit)
via 98e74ad62b23ce33f66e3841431511136bc1c2f8 (commit)
via 0fe4f0151ae7a994aaf305e7985d4ba9f992e482 (commit)
via 9df1f04f8b1f7091ab32dcd56fb6e47e3e96d5a7 (commit)
via 691c232b2655673ac352beafc0bfba4bc966f8f8 (commit)
via 6ad78d124740f1ea18f6f93721ec6f152364e878 (commit)
via 5253640054d48f7816aa00c803f5bc593c0c12c1 (commit)
via ce052cd92cd128ea3db5a8f154bd151956c2920c (commit)
via 6dfeded7b6f2f78a2d45fa54543a5962bdc6c035 (commit)
via 810c79d6d9b8efbc12ec8e1ad727cf002f2dedc6 (commit)
via c74d3b7f393f3934bae22fc9d3a4a49e2211aadb (commit)
via ed6fc7857e3fe7d64f19a0bed27226964009f095 (commit)
via e074df43e95dc002374de30503ba44e203b04788 (commit)
via b06a3e2ba1febb9e34458c5106f8d1629a191d5f (commit)
via 56af86bdab9c9700a13cc7d622653d34cbaa72f3 (commit)
via 4cbf309be8a302afe3bc041da11c24b593464157 (commit)
via b3bcd825cfb9c19a62a7db4d12717e85aca0b1e8 (commit)
via 3f5a0900a568436b011fc14b628b71bb130ae5f7 (commit)
via 6df7102965c6afdec6f621175f9e91a56ee42a67 (commit)
via 81613a741bcc9cbe909c814fab9ca99c1a1fc2fd (commit)
via cc004ec0ff327ca300cde89ffc252a9b1c588bec (commit)
via c454dfae8988337bd10bfe0551ee62a267049dfe (commit)
via afde75c1fe9ab3fa35acdf1a3b5f80ec389e1190 (commit)
via 5de7909a21a077238567b64e489ed5345824b2a0 (commit)
via b4a1bc9ba28398dbd5fdbe4ee4f118a2faf59efa (commit)
via 3ce7b09732207eac03998fa5e267672760e475c9 (commit)
via d9f4f26b0f2c73eddd07b2a4368ae1b238944b80 (commit)
via 59c8ea50e972e7753c96f6bcf46fec48e694daa2 (commit)
via 0f7dd030eb47912112b8774424a62c5561af16a1 (commit)
via fb441884baa9994093ed380aded84e707c3d34b5 (commit)
via 6f5ca0bd47ff6a9b1670f38d6a68a1a7b1a01a5c (commit)
via ee552335b8177318be98e6a4c5d941aa41091a2f (commit)
via edbcbf0ab15f140b96efab5fae808b35e705cf67 (commit)
via c4131b7a0c4a6d666a35847f8cce3d099b7a9949 (commit)
via f3e53fe5cba59946ddcf24be423eece1ab596769 (commit)
via a51d6b87331f0fc991b9926a9101e081668ebbcb (commit)
via 253d1fc351fffc8a0b1d325044854a2defdd7223 (commit)
via d7834356a301b162fb9757427359d0dbac95cecf (commit)
via 004afad6ea3fba7c8dd7730428b50fd770daec66 (commit)
via f20be125d667bceea0d940fc5fabf87b2eef86cd (commit)
via fcc707041d663b98c1992cdd1402cc183155d3c0 (commit)
via da5d5926cb26ca8dbdae119c03687cd3415f6638 (commit)
via 0314c7bb66b85775dea73c95463eed88e9e286c3 (commit)
via b8cecbbd905c10d28bcb905def7160d9e406dac4 (commit)
via 7a31e95e63013a298b449573cc5336bcd64a0419 (commit)
via e18a678b62d03729f065c40650d7183e2f260b22 (commit)
via 1d1a87939a010bd16ed23cd817261e9a655bf98f (commit)
via c6948a6df9aeedd3753bc4c5e3a553088cd98f63 (commit)
via db0371fc9e5c7a85ab524ab7bc0b8169b9ba0486 (commit)
via e906efc3747f052128eef50bed0107a0d53546c8 (commit)
via d86a9dceaddf5a2cee44170e6e677f492df5e0ea (commit)
via 4c2732cbf0bb7384ed61ab3604855f143a0c6c5d (commit)
via aaffb9c83c0fe59d9c7d590c5bea559ed8876269 (commit)
via e8a22472e58bfc7df4a661d665152fe4d70454a6 (commit)
via 2c22d334a05ec1e77299a6c55252f1d1c33082af (commit)
via 8a24b9066537caf373d0cfc11dca855eb6c3e4d9 (commit)
via 7275c59de54593d3baca81345226dda2d3a19c30 (commit)
via bcf37a11b08922d69d02fa2ea1b280b2fa2c21e0 (commit)
via a142fa6302e1e0ea2ad1c9faf59d6a70a53a6489 (commit)
via ae8748f77a0261623216b1a11f9d979f555fe892 (commit)
via d0d5a67123b8009e89e84515eee4f93b37ec8497 (commit)
via a9a976d2a5871f1501018d697d3afd299ceec5da (commit)
via df9a8f921f0d20bd70c519218335357297bffa7d (commit)
via e95625332a20fb50afe43da2db0cab507efe8ebe (commit)
via 28cad73dff9dae43a38ad7dafbee406c690fb77c (commit)
via 4de3a5bdf367d87247cb9138f8929ab4798f014e (commit)
via aa108cc824539a1d32a4aa2f46f9e58171074a9e (commit)
via 691328d91b4c4d15ace467ca47a3c987a9fb52b9 (commit)
via c06463cf96ea7401325a208af8ba457e661d1cec (commit)
via c074f6e0b72c3facf6b325b17dea1ca13a2788cc (commit)
via daa1d6dd07292142d3dec5928583b0ab1da89adf (commit)
via e7b4337aeaa760947e8e7906e64077ad7aaadc66 (commit)
via 0b235902f38d611606d44661506f32baf266fdda (commit)
via c19a295eb4125b4d2a391de65972271002412258 (commit)
via 9261da8717a433cf20218af08d3642fbeffb7d4b (commit)
via d4078d52343247b07c47370b497927a3a47a4f9a (commit)
via 1aa728ddf691657611680385c920e3a7bd5fee12 (commit)
via 1768e822df82943f075ebed023b72d225b3b0216 (commit)
via 326885a3f98c49a848a67dc48db693b8bcc7b508 (commit)
via 3e0a0e157bc2a1ca7ad9efb566755ec61eedd180 (commit)
via 93a7f7d1495795b731242e270b6dc76b1ad6b0dc (commit)
via 87e410c0061df72fe69fb47c7456ae54c609b219 (commit)
via 1ddc6158f7544c95742757654863379fff847771 (commit)
via 0f787178301c7cbf59fc7c516ebe920a33e22429 (commit)
via 9b6993b6f6507fab1bc8956f727cca60c8c9243a (commit)
via 7bda7762ab9243404bbd0964908b3365cd052969 (commit)
via 7cf7ec751e4f776dbb60cd290cea4fb217173cdb (commit)
via d5ded106a85afaf695e59941bd382bca4811fe46 (commit)
via c4ef641d07c7ddfd6b86d6b5ae944ab9a30d6990 (commit)
via e443a325b31edefe9cd4da71e10497db6544468c (commit)
via cddcafd790288f5e666198effa142132b6fc43fa (commit)
via ab5085e81007711f9d18ed77f3d78f51cf37545c (commit)
via 5e621bce015d2847104303fba574989fdf0399e0 (commit)
via 7d5c3d56743fb696405f509663b3e1558fa72e25 (commit)
via 990247bfd2248be5ae4293928101eec87e1997e9 (commit)
via e9e36557849ba6b650e503841596bd31034c1936 (commit)
via b9f87e9332895be6915e2f2960a2e921375e8e7f (commit)
via 978ae99ac4aa211ba4ba960f56bb6cdd84b648ae (commit)
via 2e60562cfda15fad37550ce5996e942084131d1c (commit)
via 2f49e3eb0ddf31d601184b516b7f44ab4ea6eece (commit)
via 17a87c6bb9d16e992fadd47b11b3eb26af54ac69 (commit)
via 2cc500af0929c1f268aeb6f8480bc428af70f4c4 (commit)
via e021b84f7fc20b3e3927093ed87e9c873d33a443 (commit)
via c46b0bc28c22f2ae4b46c592f450e745774846d4 (commit)
via 7740b9810bc093a9083e8c3404afc627c8b78242 (commit)
via 62432e71ef943744fd4ca9ce216da1b0a7250573 (commit)
via 005c77dfe53b54cef92ce51d91f615eb9c2769c4 (commit)
via ce3bc8504d765ecc9b453398efb18662bd4f277a (commit)
via 94fc6d8d303053c47064c9408947cd49a8e11975 (commit)
via ba292defc14029971d5e9043881ddb98c994cfdb (commit)
via c5cf3cc081042fec0e2baea7cdf7f22a8a84664a (commit)
via 779e145d8f15ad9975f6ca689e6a595ea0a3de4b (commit)
via adcbbb141bdb09a6fd999f3369e15c2881f843ba (commit)
via 80014655d76e758868e8e1ed36472be9a606eb2a (commit)
via 959dc163810ac286e01d0163624f5bbad5b82c55 (commit)
via 1d74428fb7a817790c397338db92d102e2113e1c (commit)
via d5e24e94bbd581098e460fc3a0b437478340c876 (commit)
via 4cd96de7e7d4ac12c38b45efe7b3ee0ed331d3b9 (commit)
via 914fe9bc05003defeff70acb84a52e86fb9ced4c (commit)
via b22882ae78f0e5d38d4b6ace0725bf0ae5bc4803 (commit)
via c6ca831b3f171da96fad75c21dffbd2bed71e297 (commit)
via 8ce8e05a403440e7f2323e9d43dca08be1cf8a94 (commit)
via 414b25d4bfa89e0609cd3c8c3a6e610681f4c929 (commit)
via f57e8133a7af31a59578ac2cd50dd20418cb8fbc (commit)
via 85a14b1daffb3a20e9e510b73d25c71ba95cc350 (commit)
via 774a56a8beeef3a73258910b12cace20443a1bcb (commit)
via 89bd1bf64a6d745f4276fce3ee8fa4e050736ff1 (commit)
via f429202995ebb0dbc86d41c6d707815186832063 (commit)
via f14bc0502c3c4d2ffd609b110771ca1fa752b68e (commit)
via f75d5bd488669426794d086b80568ef0a7a4afe6 (commit)
via d719b47c4131e2120305cee60395c0a88f5aca25 (commit)
via c7db1351d3b1c25bfc31ed9e7b6b491e6bcb1555 (commit)
via ac15a86eb62832cc22533bc33b802ea297666ad5 (commit)
via 0af72968bfd192fa418551ae75def455adcfbb4b (commit)
via 977f822d94c59bfd9d56373404291fc85218b1d6 (commit)
via d00042b03e1f85cd1d8ea8340d5ac72222e5123e (commit)
via 0081ce40b832f4c5abaeb0316736d772aec3f08d (commit)
via f03688da19c21b4d46761cc4ed9da981cebe43c1 (commit)
via eb8ba927115b091bb407cbc29ad2d07dfed318f1 (commit)
via b19a36e30d0d3829c68f2e0300ea1487da242af8 (commit)
via 12b3473393fb7a471fc7d928476b0ba66da145e9 (commit)
via cfd1d9e142fa2fd8b21f74de0e4a0109e0a04439 (commit)
via 67b352b3f7cf736c9aa7c1332aa7814911556ad5 (commit)
via 822a00aee0d7feb845e28dad7dccb552d10d83db (commit)
via c293f639684d2c6625b7395c995aa813eafa5fa4 (commit)
via 00686a614cca93f007335d01c06d78cfd212d973 (commit)
via 5951ef6faaffcff62d9a9963260a932666e3decb (commit)
via f82dc7b09f470f79ed2bf099216fa64c76528d3b (commit)
via 5b7dee0548f068e626c0bf5d116fc506d2af92a0 (commit)
via 7990857c32cbb49f4bedf805f86c1b718b3a70d0 (commit)
via 5d6fde4aa0d2a93945276dd722be48e05da72faf (commit)
via f6a1807c25d85a0ca762bfa276ebac4a3430e7c7 (commit)
via 65e4595c21bf9c01fb0b7da61577ae8a79d29c30 (commit)
via 19c8c07e6e1601180f85f7aad145f00112f3f8a4 (commit)
via 87090907f39983b744749017cdac3fb957d8d0c0 (commit)
via 2808941eebec54dc7c4981f5a2a0e149d452b8ca (commit)
via 9351dbcc88ccdd6aa83d72f432f19a76c031124b (commit)
via de06b256b36f6428c5d914266c4e91c25c69ded5 (commit)
via d4867b8dd18ddbee0b30040f569eeac99964343f (commit)
via b5347a6b22c2d82ffa57c8302c81ee0f25b413a1 (commit)
via 848cfc635084c5baccb275ed4995032d3ada2d59 (commit)
via 46b961d69aff3a2e4d1cb7f3d0910bfcc66d1e19 (commit)
via 52357dbe51bd015119a798a4f8e7244a3e1efda4 (commit)
via 97153d16eb9ecb7281ed9dc76783091964e769dd (commit)
via 56083614ae0e8c5177786528e85d348686bf9bc2 (commit)
via c9d7e29600f7a80094bcda2c3bd87d8f07d813e9 (commit)
via 2b6bcb84a17fc98ea0ea87df65e6a77829857ecd (commit)
via cc6d6b14603924a4ef2d86dfaf758447cca6a7ff (commit)
via 69642fb8f55cb4741f977d3fbaacd5d12d742625 (commit)
via 86257c05755c8adbb19ce684546b718dd48a5ef8 (commit)
via 5f13949918d125f851bd2ba8ab092c301835d3ac (commit)
via cce2a00af57ef823abeaeff787eff35f43dfb093 (commit)
via 7e1e150e056d0dcf5a58b2a8036f47c2e5dac820 (commit)
via 15428e5a9c1bb01f5e7a04979c17ec5f1de9d1db (commit)
via ac9fd0a240cbfa8c448cb01bb69ac92313eb7e56 (commit)
via ce0544bd0852415891cb31e0c1b7d0ba0b3d19f3 (commit)
via dba1e2c7884b5bc68f945fd5d2dd500f9a258c6b (commit)
via bc281e8b48c92102d3c64318e07598c8e96e493c (commit)
via 82667b0cdd6592053f5b2f4cfa1cbd0ec92db0b2 (commit)
via 71b0ae9ddbcbf4093900ff879e2e1c82be89867f (commit)
via 1b96c2563342098e05ac4b240c66e60222249cf4 (commit)
via ff14da4f9b706a47f152491eae60586b75430c6e (commit)
via d23cde8c4285cf55b007b300123c41fa852d38d9 (commit)
via 885d7987eefb0b8b694626b0831ed93123fb8d8d (commit)
via 07cd1647921e0e94432cecb2f7a5413cd8f3884e (commit)
via efea6557fd364ee42c84c08df28efa9797f1c9c8 (commit)
via fea1f88cd0bb5bdeefc6048b122da4328635163d (commit)
via 4db53f3593e24b80a33b608432ef463acbec295e (commit)
via 009d45dfbb20a54ea402e7e8f18bc2d253f41ad6 (commit)
via f1d52ff7171da920acc7583fa427a95386312908 (commit)
via f33ffa77fdcc3e40ec42268ea09b67ac65982f1f (commit)
via ac08c1b86b979574678aa110f19fb744719def21 (commit)
via 3702df52de21023d90052afdc54732d9ad285b39 (commit)
via e47f04584b00f6d7b5c8bf9e8ae6af9aaa6831fd (commit)
via 823e0fcf308c7f3fc88ba48070e12bd995e75392 (commit)
via 608d45610e9f499fb43d2e52eba461d489a7d45f (commit)
via e76dc86b0a01a54dab56cbf8552bd0c5fbb5b461 (commit)
via f17363ea38564867df555b6be9138d2eff28daa0 (commit)
via 16e52275c4c9e355cf4e448a5b17136f24324d7a (commit)
via 61029d971895738ba353841d99f4ca07ecf792b7 (commit)
via 1c8043e5b50bd47d7734397a08d5015e3672b9ad (commit)
via 9819295a58b8b40ca6d95c84f1f1de08fb0eb707 (commit)
via dc3b856b460ff380feb68cdff551f334e6db5a27 (commit)
via be9d5fe994e6a086a951e432d56e7de2af3cfd09 (commit)
via 11b8b873e7fd6722053aa224d20f29350bf2b298 (commit)
via b63b9aac20259f3612e23c7a3e977dcb48693ef1 (commit)
via 14a0766224d50d1c4c409e883cf29515dafc25f0 (commit)
via b5fbd9c942b1080aa60a48ee23da60574d1fc22f (commit)
via 63f4617b5ab99d75e98e40760ff68bb1615a84e6 (commit)
via 579fd2bf848e994ed6dcd8d1c3633f2fa62cbd28 (commit)
via e89a3a1302cd3e95403c5c64edb126153852ff35 (commit)
via 87a4f24037965ae88435ebe3f887750c500cbfde (commit)
via aa9497f4d2346e7a18cd07b9bf31dfb5832031bc (commit)
via 7b0201a4f98ee1b1288ae3b074cd1007707b6b21 (commit)
via ba7bc1e14fcf1a223a9a42ede2e9cd7d290c8b61 (commit)
via c6ef5865b3fd8e5d5fb8c891467b3722fde4d685 (commit)
via e05a3418c9d6b3f70cdb387d1f30d8ba59733f02 (commit)
via 525d9602da83a5d8ddbfc9ebda282209aa743a70 (commit)
via c6dc0f2d6f67d69d32e7f8c3c175d79f4b2ef430 (commit)
via 85b53414c2c8f70e541447ee204e004693289956 (commit)
via 6c3401b4a9fb79bdee7484e1e3c05758d1b0c0ca (commit)
via a5cf5c7b3a6ac9be60a8737f0e36a61897d32acd (commit)
via 734cae300ccd13aacec1f32b283d4d21b5de8fb5 (commit)
via 07708b4325680c4731f0d3dc24bca9da3c962d80 (commit)
via b4007e4b25d21ba3b693674ca19ead7d202b7de0 (commit)
via 688d0a641d4fa7a018fb4f9e131ed1454c68dd15 (commit)
via c136060da6a43da5db7e45b6a32da83f0f7d0820 (commit)
from 5ece3fe5e40efbcf7d727650475c35850624cfaf (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 67d8e93028e014f644868fede3570abb28e5fb43
Merge: 5ece3fe5e40efbcf7d727650475c35850624cfaf 73df015104eb5ac8934ff1176c24079e6e9b09c3
Author: zhanglikun <zhanglikun at cnnic.cn>
Date: Tue Aug 23 20:07:49 2011 +0800
Merge branch 'master' into trac519
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 31 +-
README | 10 +-
doc/guide/bind10-guide.html | 565 +++-
doc/guide/bind10-guide.xml | 895 ++++-
doc/guide/bind10-messages.html | 2028 +++++++++--
doc/guide/bind10-messages.xml | 3790 +++++++++++++++++---
src/bin/auth/auth.spec.pre.in | 18 +
src/bin/auth/b10-auth.8 | 47 +-
src/bin/auth/b10-auth.xml | 48 +-
src/bin/auth/query.cc | 8 +-
src/bin/auth/query.h | 8 +-
src/bin/auth/tests/query_unittest.cc | 8 +-
src/bin/bind10/bind10.8 | 16 +-
src/bin/bind10/bind10.xml | 28 +-
src/bin/bind10/bob.spec | 11 +
src/bin/bind10/creatorapi.txt | 123 +
src/bin/resolver/b10-resolver.8 | 30 +-
src/bin/resolver/b10-resolver.xml | 32 +-
src/bin/stats/b10-stats.8 | 97 +-
src/bin/stats/b10-stats.xml | 122 +-
src/bin/stats/stats-schema.spec | 3 +-
src/bin/stats/stats.spec | 45 +
src/bin/stats/tests/isc/config/ccsession.py | 89 +
src/bin/xfrin/b10-xfrin.8 | 5 +-
src/bin/xfrin/b10-xfrin.xml | 3 +-
src/bin/xfrout/b10-xfrout.xml | 8 +
src/lib/cache/cache_messages.mes | 4 +-
src/lib/cc/session.cc | 2 +-
src/lib/config/module_spec.cc | 91 +-
src/lib/config/module_spec.h | 23 +-
src/lib/config/tests/ccsession_unittests.cc | 4 +-
src/lib/config/tests/module_spec_unittests.cc | 158 +-
src/lib/config/tests/testdata/Makefile.am | 8 +
src/lib/config/tests/testdata/data33_1.data | 7 +
src/lib/config/tests/testdata/data33_2.data | 7 +
src/lib/config/tests/testdata/spec2.spec | 11 +
src/lib/config/tests/testdata/spec33.spec | 50 +
src/lib/config/tests/testdata/spec34.spec | 14 +
src/lib/config/tests/testdata/spec35.spec | 15 +
src/lib/config/tests/testdata/spec36.spec | 17 +
src/lib/config/tests/testdata/spec37.spec | 7 +
src/lib/config/tests/testdata/spec38.spec | 17 +
src/lib/datasrc/Makefile.am | 4 +-
src/lib/datasrc/client.h | 39 +
src/lib/datasrc/database.cc | 501 +++
src/lib/datasrc/database.h | 430 +++
src/lib/datasrc/datasrc_messages.mes | 68 +-
src/lib/datasrc/iterator.h | 61 +
src/lib/datasrc/memory_datasrc.cc | 140 +-
src/lib/datasrc/memory_datasrc.h | 14 +-
src/lib/datasrc/sqlite3_accessor.cc | 472 +++
src/lib/datasrc/sqlite3_accessor.h | 147 +
src/lib/datasrc/static_datasrc.cc | 1 +
src/lib/datasrc/tests/Makefile.am | 3 +
src/lib/datasrc/tests/cache_unittest.cc | 6 +-
src/lib/datasrc/tests/client_unittest.cc | 47 +
src/lib/datasrc/tests/database_unittest.cc | 1115 ++++++
src/lib/datasrc/tests/memory_datasrc_unittest.cc | 39 +
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 332 ++
src/lib/datasrc/tests/static_unittest.cc | 1 +
src/lib/datasrc/zone.h | 6 +-
src/lib/dns/Makefile.am | 8 +
src/lib/dns/rdata/generic/afsdb_18.cc | 170 +
src/lib/dns/rdata/generic/afsdb_18.h | 74 +
src/lib/dns/rdata/generic/minfo_14.cc | 155 +
src/lib/dns/rdata/generic/minfo_14.h | 82 +
src/lib/dns/rdata/generic/naptr_35.cc | 314 ++
src/lib/dns/rdata/generic/naptr_35.h | 63 +
src/lib/dns/rdata/generic/rrsig_46.cc | 5 +
src/lib/dns/rdata/generic/rrsig_46.h | 3 +
src/lib/dns/rdata/in_1/dhcid_49.cc | 145 +
src/lib/dns/rdata/in_1/dhcid_49.h | 58 +
src/lib/dns/rdata/in_1/srv_33.h | 4 +-
src/lib/dns/tests/Makefile.am | 3 +
src/lib/dns/tests/rdata_afsdb_unittest.cc | 210 ++
src/lib/dns/tests/rdata_minfo_unittest.cc | 184 +
src/lib/dns/tests/rdata_naptr_unittest.cc | 178 +
src/lib/dns/tests/rdata_rrsig_unittest.cc | 2 +-
src/lib/dns/tests/testdata/Makefile.am | 20 +
.../dns/tests/testdata/rdata_afsdb_fromWire1.spec | 3 +
.../dns/tests/testdata/rdata_afsdb_fromWire2.spec | 6 +
.../dns/tests/testdata/rdata_afsdb_fromWire3.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_fromWire4.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_fromWire5.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_toWire1.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_toWire2.spec | 8 +
.../dns/tests/testdata/rdata_minfo_fromWire1.spec | 3 +
.../dns/tests/testdata/rdata_minfo_fromWire2.spec | 7 +
.../dns/tests/testdata/rdata_minfo_fromWire3.spec | 6 +
.../dns/tests/testdata/rdata_minfo_fromWire4.spec | 6 +
.../dns/tests/testdata/rdata_minfo_fromWire5.spec | 5 +
.../dns/tests/testdata/rdata_minfo_fromWire6.spec | 5 +
.../dns/tests/testdata/rdata_minfo_toWire1.spec | 5 +
.../dns/tests/testdata/rdata_minfo_toWire2.spec | 6 +
.../testdata/rdata_minfo_toWireUncompressed1.spec | 7 +
.../testdata/rdata_minfo_toWireUncompressed2.spec | 8 +
src/lib/exceptions/exceptions.h | 12 +
src/lib/python/isc/config/ccsession.py | 1 +
src/lib/python/isc/config/cfgmgr.py | 15 +
src/lib/python/isc/config/module_spec.py | 111 +-
src/lib/python/isc/config/tests/cfgmgr_test.py | 22 +
.../python/isc/config/tests/module_spec_test.py | 109 +
src/lib/util/filename.h | 5 +
src/lib/util/python/gen_wiredata.py.in | 43 +
src/lib/util/tests/filename_unittest.cc | 15 +
105 files changed, 12829 insertions(+), 1202 deletions(-)
create mode 100644 src/bin/bind10/creatorapi.txt
create mode 100644 src/lib/config/tests/testdata/data33_1.data
create mode 100644 src/lib/config/tests/testdata/data33_2.data
create mode 100644 src/lib/config/tests/testdata/spec33.spec
create mode 100644 src/lib/config/tests/testdata/spec34.spec
create mode 100644 src/lib/config/tests/testdata/spec35.spec
create mode 100644 src/lib/config/tests/testdata/spec36.spec
create mode 100644 src/lib/config/tests/testdata/spec37.spec
create mode 100644 src/lib/config/tests/testdata/spec38.spec
create mode 100644 src/lib/datasrc/database.cc
create mode 100644 src/lib/datasrc/database.h
create mode 100644 src/lib/datasrc/iterator.h
create mode 100644 src/lib/datasrc/sqlite3_accessor.cc
create mode 100644 src/lib/datasrc/sqlite3_accessor.h
create mode 100644 src/lib/datasrc/tests/client_unittest.cc
create mode 100644 src/lib/datasrc/tests/database_unittest.cc
create mode 100644 src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
create mode 100644 src/lib/dns/rdata/generic/afsdb_18.cc
create mode 100644 src/lib/dns/rdata/generic/afsdb_18.h
create mode 100644 src/lib/dns/rdata/generic/minfo_14.cc
create mode 100644 src/lib/dns/rdata/generic/minfo_14.h
create mode 100644 src/lib/dns/rdata/generic/naptr_35.cc
create mode 100644 src/lib/dns/rdata/generic/naptr_35.h
create mode 100644 src/lib/dns/rdata/in_1/dhcid_49.cc
create mode 100644 src/lib/dns/rdata/in_1/dhcid_49.h
create mode 100644 src/lib/dns/tests/rdata_afsdb_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_minfo_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_naptr_unittest.cc
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 5a14558..43c9154 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,32 @@
+282. [func] ocean
+ libdns++: Implement the NAPTR rrtype according to RFC2915,
+ RFC2168 and RFC3403.
+ (Trac #1130, git 01d8d0f13289ecdf9996d6d5d26ac0d43e30549c)
+
+bind10-devel-20110819 released on August 19, 2011
+
+281. [func] jelte
+ Added a new type for configuration data: "named set". This allows for
+ similar configuration as the current "list" type, but with strings
+ instead of indices as identifiers. The intended use is for instance
+ /foo/zones/example.org/bar instead of /foo/zones[2]/bar. Currently
+ this new type is not in use yet.
+ (Trac #926, git 06aeefc4787c82db7f5443651f099c5af47bd4d6)
+
+280. [func] jerry
+ libdns++: Implement the MINFO rrtype according to RFC1035.
+ (Trac #1113, git 7a9a19d6431df02d48a7bc9de44f08d9450d3a37)
+
+279. [func] jerry
+ libdns++: Implement the AFSDB rrtype according to RFC1183.
+ (Trac #1114, git ce052cd92cd128ea3db5a8f154bd151956c2920c)
+
+278. [doc] jelte
+ Add logging configuration documentation to the guide.
+ (Trac #1011, git 2cc500af0929c1f268aeb6f8480bc428af70f4c4)
+
277. [func] jerry
- Implement the SRV rrtype according to RFC2782.
+ libdns++: Implement the SRV rrtype according to RFC2782.
(Trac #1128, git 5fd94aa027828c50e63ae1073d9d6708e0a9c223)
276. [func] stephen
@@ -25,7 +52,7 @@
returns is str or byte.
(Trac #1021, git 486bf91e0ecc5fbecfe637e1e75ebe373d42509b)
-273. [func] vorner
+273. [func] vorner
It is possible to specify ACL for the xfrout module. It is in the ACL
configuration key and has the usual ACL syntax. It currently supports
only the source address. Default ACL accepts everything.
diff --git a/README b/README
index a6509da..4b84a88 100644
--- a/README
+++ b/README
@@ -8,10 +8,10 @@ for serving, maintaining, and developing DNS.
BIND10-devel is new development leading up to the production
BIND 10 release. It contains prototype code and experimental
interfaces. Nevertheless it is ready to use now for testing the
-new BIND 10 infrastructure ideas. The Year 2 milestones of the
-five year plan are described here:
+new BIND 10 infrastructure ideas. The Year 3 goals of the five
+year plan are described here:
- https://bind10.isc.org/wiki/Year2Milestones
+ http://bind10.isc.org/wiki/Year3Goals
This release includes the bind10 master process, b10-msgq message
bus, b10-auth authoritative DNS server (with SQLite3 and in-memory
@@ -67,8 +67,8 @@ e.g.,
Operating-System specific tips:
- FreeBSD
- You may need to install a python binding for sqlite3 by hand. A
- sample procedure is as follows:
+ You may need to install a python binding for sqlite3 by hand.
+ A sample procedure is as follows:
- add the following to /etc/make.conf
PYTHON_VERSION=3.1
- build and install the python binding from ports, assuming the top
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 5754cf0..a9a4cc6 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,24 +1,24 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
- 20110519.</p></div><div><p class="copyright">Copyright © 2010 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
+ 20110809.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the reference guide for BIND 10 version 20110519.
+ This is the reference guide for BIND 10 version 20110809.
The most up-to-date version of this document, along with
- other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+ other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229460181">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229460208">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229445988">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446178">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229446197">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229446258">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229446356">Build</a></span></dt><dt><span class="section"><a href="#id1168229446371">Install</a></span></dt><dt><span class="section"><a href="#id1168229446394">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446979">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229447044">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229447074">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447556">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229447671">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447788">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447799">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229448040">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229448215">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229448428">Logging Message Format</a></span></dt></dl></dd></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><di
v class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229460181">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229460208">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
BIND is the popular implementation of a DNS server, developer
interfaces, and DNS tools.
BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++ and Python
and provides a modular environment for serving and maintaining DNS.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This guide covers the experimental prototype of
- BIND 10 version 20110519.
+ BIND 10 version 20110809.
</p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
BIND 10 provides a EDNS0- and DNSSEC-capable
authoritative DNS server and a caching recursive name server
which also provides forwarding.
- </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299038"></a>Supported Platforms</h2></div></div></div><p>
+ </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229460181"></a>Supported Platforms</h2></div></div></div><p>
BIND 10 builds have been tested on Debian GNU/Linux 5,
Ubuntu 9.10, NetBSD 5, Solaris 10, FreeBSD 7 and 8, and CentOS
Linux 5.3.
@@ -28,13 +28,15 @@
It is planned for BIND 10 to build, install and run on
Windows and standard Unix-type platforms.
- </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299065"></a>Required Software</h2></div></div></div><p>
+ </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229460208"></a>Required Software</h2></div></div></div><p>
BIND 10 requires Python 3.1. Later versions may work, but Python
3.1 is the minimum version which will work.
</p><p>
BIND 10 uses the Botan crypto library for C++. It requires
- at least Botan version 1.8. To build BIND 10, install the
- Botan libraries and development include headers.
+ at least Botan version 1.8.
+ </p><p>
+ BIND 10 uses the log4cplus C++ logging library. It requires
+ at least log4cplus version 1.0.3.
</p><p>
The authoritative server requires SQLite 3.3.9 or newer.
The <span class="command"><strong>b10-xfrin</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>,
@@ -136,7 +138,10 @@
and, of course, DNS. These include detailed developer
documentation and code examples.
- </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284846"></a>Building Requirements</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229445988">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446178">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229446197">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229446258">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229446356">Build</a></span></dt><dt><span class="section"><a href="#id1168229446371">Install</a></span></dt><dt><span class="section"><a href="#id1168229446394">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229445988"></a>Building Requirements</h2></div></div></div><p>
+ In addition to the run-time requirements, building BIND 10
+ from source code requires various development include headers.
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Some operating systems have split their distribution packages into
a run-time and a development package. You will need to install
the development package versions, which include header files and
@@ -147,6 +152,11 @@
</p><p>
+ To build BIND 10, also install the Botan (at least version
+ 1.8) and the log4cplus (at least version 1.0.3)
+ development include headers.
+ </p><p>
+
The Python Library and Python _sqlite3 module are required to
enable the Xfrout and Xfrin support.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
@@ -156,7 +166,7 @@
Building BIND 10 also requires a C++ compiler and
standard development headers, make, and pkg-config.
BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
- 4.1.3, 4.2.1, 4.3.2, and 4.4.1.
+ 4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
</p></div><div class="section" title="Quick start"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="quickstart"></a>Quick start</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This quickly covers the standard steps for installing
and deploying BIND 10 as an authoritative name server using
@@ -192,14 +202,14 @@
the Git code revision control system or as a downloadable
tar file. It may also be available in pre-compiled ready-to-use
packages from operating system vendors.
- </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285026"></a>Download Tar File</h3></div></div></div><p>
+ </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446178"></a>Download Tar File</h3></div></div></div><p>
Downloading a release tar file is the recommended method to
obtain the source code.
</p><p>
The BIND 10 releases are available as tar file downloads from
<a class="ulink" href="ftp://ftp.isc.org/isc/bind10/" target="_top">ftp://ftp.isc.org/isc/bind10/</a>.
Periodic development snapshots may also be available.
- </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285045"></a>Retrieve from Git</h3></div></div></div><p>
+ </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446197"></a>Retrieve from Git</h3></div></div></div><p>
Downloading this "bleeding edge" code is recommended only for
developers or advanced users. Using development code in a production
environment is not recommended.
@@ -233,7 +243,7 @@
<span class="command"><strong>autoheader</strong></span>,
<span class="command"><strong>automake</strong></span>,
and related commands.
- </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285106"></a>Configure before the build</h3></div></div></div><p>
+ </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446258"></a>Configure before the build</h3></div></div></div><p>
BIND 10 uses the GNU Build System to discover build environment
details.
To generate the makefiles using the defaults, simply run:
@@ -242,7 +252,7 @@
Run <span class="command"><strong>./configure</strong></span> with the <code class="option">--help</code>
switch to view the different options. The commonly-used options are:
- </p><div class="variablelist"><dl><dt><span class="term">--prefix</span></dt><dd>Define the the installation location (the
+ </p><div class="variablelist"><dl><dt><span class="term">--prefix</span></dt><dd>Define the installation location (the
default is <code class="filename">/usr/local/</code>).
</dd><dt><span class="term">--with-boost-include</span></dt><dd>Define the path to find the Boost headers.
</dd><dt><span class="term">--with-pythonpath</span></dt><dd>Define the path to Python 3.1 if it is not in the
@@ -264,16 +274,16 @@
</p><p>
If the configure fails, it may be due to missing or old
dependencies.
- </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285203"></a>Build</h3></div></div></div><p>
+ </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446356"></a>Build</h3></div></div></div><p>
After the configure step is complete, to build the executables
from the C++ code and prepare the Python scripts, run:
</p><pre class="screen">$ <strong class="userinput"><code>make</code></strong></pre><p>
- </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285219"></a>Install</h3></div></div></div><p>
+ </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446371"></a>Install</h3></div></div></div><p>
To install the BIND 10 executables, support files,
and documentation, run:
</p><pre class="screen">$ <strong class="userinput"><code>make install</code></strong></pre><p>
- </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285242"></a>Install Hierarchy</h3></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446394"></a>Install Hierarchy</h3></div></div></div><p>
The following is the layout of the complete BIND 10 installation:
</p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem">
<code class="filename">bin/</code> —
@@ -304,14 +314,14 @@
data source and configuration databases.
</li></ul></div><p>
</p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></div><p>
- BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
+ BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
starts up the required processes.
<span class="command"><strong>bind10</strong></span>
will also restart processes that exit unexpectedly.
This is the only command needed to start the BIND 10 system.
</p><p>
After starting the <span class="command"><strong>b10-msgq</strong></span> communications channel,
- <span class="command"><strong>bind10</strong></span> connects to it,
+ <span class="command"><strong>bind10</strong></span> connects to it,
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</p><p>
@@ -334,7 +344,12 @@
To start the BIND 10 service, simply run <span class="command"><strong>bind10</strong></span>.
Run it with the <code class="option">--verbose</code> switch to
get additional debugging or diagnostic output.
- </p></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ If the setproctitle Python module is detected at start up,
+ the process names for the Python-based daemons will be renamed
+ to better identify them instead of just <span class="quote">“<span class="quote">python</span>”</span>.
+ This is not needed on some operating systems.
+ </p></div></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
The BIND 10 components use the <span class="command"><strong>b10-msgq</strong></span>
message routing daemon to communicate with other BIND 10 components.
The <span class="command"><strong>b10-msgq</strong></span> implements what is called the
@@ -490,12 +505,12 @@ shutdown
the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
channel) the configuration on to the specified module.
</p><p>
- </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></div><p>
+ </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229446979">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229447044">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229447074">Loading Master Zones Files</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
It supports EDNS0 and DNSSEC. It supports IPv6.
Normally it is started by the <span class="command"><strong>bind10</strong></span> master
process.
- </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285816"></a>Server Configurations</h2></div></div></div><p>
+ </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229446979"></a>Server Configurations</h2></div></div></div><p>
<span class="command"><strong>b10-auth</strong></span> is configured via the
<span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -515,7 +530,7 @@ This may be a temporary setting until then.
</p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
</dd></dl></div><p>
- </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285881"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447044"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
supports a SQLite3 data source backend and in-memory data source
backend.
@@ -529,7 +544,7 @@ This may be a temporary setting until then.
The default is <code class="filename">/usr/local/var/</code>.)
This data file location may be changed by defining the
<span class="quote">“<span class="quote">database_file</span>”</span> configuration.
- </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285912"></a>Loading Master Zones Files</h2></div></div></div><p>
+ </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447074"></a>Loading Master Zones Files</h2></div></div></div><p>
RFC 1035 style DNS master zone files may imported
into a BIND 10 data source by using the
<span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -569,7 +584,7 @@ This may be a temporary setting until then.
provide <span class="quote">“<span class="quote">secondary</span>”</span> service.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
@@ -591,7 +606,7 @@ This may be a temporary setting until then.
NOTIFY messages to slaves.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
Access control is not yet provided.
</p></div></div><div class="chapter" title="Chapter 11. Secondary Manager"><div class="titlepage"><div><div><h2 class="title"><a name="zonemgr"></a>Chapter 11. Secondary Manager</h2></div></div></div><p>
The <span class="command"><strong>b10-zonemgr</strong></span> process is started by
@@ -607,13 +622,13 @@ This may be a temporary setting until then.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Access control (such as allowing notifies) is not yet provided.
The primary/secondary service is not yet complete.
- </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229447556">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229447671">Forwarding</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-resolver</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
</p><p>
The main <span class="command"><strong>bind10</strong></span> process can be configured
- to select to run either the authoritative or resolver.
+ to select to run either the authoritative or resolver or both.
By default, it starts the authoritative service.
@@ -629,14 +644,52 @@ This may be a temporary setting until then.
The master <span class="command"><strong>bind10</strong></span> will stop and start
the desired services.
</p><p>
- The resolver also needs to be configured to listen on an address
- and port:
+ By default, the resolver listens on port 53 for 127.0.0.1 and ::1.
+ The following example shows how it can be configured to
+ listen on an additional address (and port):
</p><pre class="screen">
-> <strong class="userinput"><code>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</code></strong>
+> <strong class="userinput"><code>config add Resolver/listen_on</code></strong>
+> <strong class="userinput"><code>config set Resolver/listen_on[<em class="replaceable"><code>2</code></em>]/address "192.168.1.1"</code></strong>
+> <strong class="userinput"><code>config set Resolver/listen_on[<em class="replaceable"><code>2</code></em>]/port 53</code></strong>
> <strong class="userinput"><code>config commit</code></strong>
</pre><p>
- </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286300"></a>Forwarding</h2></div></div></div><p>
+ </p><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
+ as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
+ Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447556"></a>Access Control</h2></div></div></div><p>
+ By default, the <span class="command"><strong>b10-resolver</strong></span> daemon only accepts
+ DNS queries from the localhost (127.0.0.1 and ::1).
+ The <code class="option">Resolver/query_acl</code> configuration may
+ be used to reject, drop, or allow specific IPs or networks.
+ This configuration list is first match.
+ </p><p>
+ The configuration's <code class="option">action</code> item may be
+ set to <span class="quote">“<span class="quote">ACCEPT</span>”</span> to allow the incoming query,
+ <span class="quote">“<span class="quote">REJECT</span>”</span> to respond with a DNS REFUSED return
+ code, or <span class="quote">“<span class="quote">DROP</span>”</span> to ignore the query without
+ any response (such as a blackhole). For more information,
+ see the respective debugging messages: <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_ACCEPTED" target="_top">RESOLVER_QUERY_ACCEPTED</a>,
+ <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_REJECTED" target="_top">RESOLVER_QUERY_REJECTED</a>,
+ and <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_DROPPED" target="_top">RESOLVER_QUERY_DROPPED</a>.
+ </p><p>
+ The required configuration's <code class="option">from</code> item is set
+ to an IPv4 or IPv6 address, addresses with an network mask, or to
+ the special lowercase keywords <span class="quote">“<span class="quote">any6</span>”</span> (for
+ any IPv6 address) or <span class="quote">“<span class="quote">any4</span>”</span> (for any IPv4
+ address).
+ </p><p>
+ For example to allow the <em class="replaceable"><code>192.168.1.0/24</code></em>
+ network to use your recursive name server, at the
+ <span class="command"><strong>bindctl</strong></span> prompt run:
+ </p><pre class="screen">
+> <strong class="userinput"><code>config add Resolver/query_acl</code></strong>
+> <strong class="userinput"><code>config set Resolver/query_acl[<em class="replaceable"><code>2</code></em>]/action "ACCEPT"</code></strong>
+> <strong class="userinput"><code>config set Resolver/query_acl[<em class="replaceable"><code>2</code></em>]/from "<em class="replaceable"><code>192.168.1.0/24</code></em>"</code></strong>
+> <strong class="userinput"><code>config commit</code></strong>
+</pre><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
+ as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
+ Resolver/query_acl</code></strong></span>”</span> if needed.)</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>This prototype access control configuration
+ syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447671"></a>Forwarding</h2></div></div></div><p>
To enable forwarding, the upstream address and port must be
configured to forward queries to, such as:
@@ -684,48 +737,414 @@ This may be a temporary setting until then.
"stats.timestamp": 1295543046.823504
}
</pre><p>
- </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><p>
- Each message written by BIND 10 to the configured logging destinations
- comprises a number of components that identify the origin of the
- message and, if the message indicates a problem, information about the
- problem that may be useful in fixing it.
- </p><p>
- Consider the message below logged to a file:
- </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229447788">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447799">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229448040">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229448215">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229448428">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447788"></a>Logging configuration</h2></div></div></div><p>
+
+ The logging system in BIND 10 is configured through the
+ Logging module. All BIND 10 modules will look at the
+ configuration in Logging to see what should be logged and
+ to where.
+
+
+
+ </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229447799"></a>Loggers</h3></div></div></div><p>
+
+ Within BIND 10, a message is logged through a component
+ called a "logger". Different parts of BIND 10 log messages
+ through different loggers, and each logger can be configured
+ independently of one another.
+
+ </p><p>
+
+ In the Logging module, you can specify the configuration
+ for zero or more loggers; any that are not specified will
+ take appropriate default values..
+
+ </p><p>
+
+ The three most important elements of a logger configuration
+ are the <code class="option">name</code> (the component that is
+ generating the messages), the <code class="option">severity</code>
+ (what to log), and the <code class="option">output_options</code>
+ (where to log).
+
+ </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447824"></a>name (string)</h4></div></div></div><p>
+ Each logger in the system has a name, the name being that
+ of the component using it to log messages. For instance,
+ if you want to configure logging for the resolver module,
+ you add an entry for a logger named <span class="quote">“<span class="quote">Resolver</span>”</span>. This
+ configuration will then be used by the loggers in the
+ Resolver module, and all the libraries used by it.
+ </p><p>
+
+ If you want to specify logging for one specific library
+ within the module, you set the name to
+ <em class="replaceable"><code>module.library</code></em>. For example, the
+ logger used by the nameserver address store component
+ has the full name of <span class="quote">“<span class="quote">Resolver.nsas</span>”</span>. If
+ there is no entry in Logging for a particular library,
+ it will use the configuration given for the module.
+
+
+
+ </p><p>
+
+
+
+ To illustrate this, suppose you want the cache library
+ to log messages of severity DEBUG, and the rest of the
+ resolver code to log messages of severity INFO. To achieve
+ this you specify two loggers, one with the name
+ <span class="quote">“<span class="quote">Resolver</span>”</span> and severity INFO, and one with
+ the name <span class="quote">“<span class="quote">Resolver.cache</span>”</span> with severity
+ DEBUG. As there are no entries for other libraries (e.g.
+ the nsas), they will use the configuration for the module
+ (<span class="quote">“<span class="quote">Resolver</span>”</span>), so giving the desired behavior.
+
+ </p><p>
+
+ One special case is that of a module name of <span class="quote">“<span class="quote">*</span>”</span>
+ (asterisks), which is interpreted as <span class="emphasis"><em>any</em></span>
+ module. You can set global logging options by using this,
+ including setting the logging configuration for a library
+ that is used by multiple modules (e.g. <span class="quote">“<span class="quote">*.config</span>”</span>
+ specifies the configuration library code in whatever
+ module is using it).
+
+ </p><p>
+
+ If there are multiple logger specifications in the
+ configuration that might match a particular logger, the
+ specification with the more specific logger name takes
+ precedence. For example, if there are entries for for
+ both <span class="quote">“<span class="quote">*</span>”</span> and <span class="quote">“<span class="quote">Resolver</span>”</span>, the
+ resolver module — and all libraries it uses —
+ will log messages according to the configuration in the
+ second entry (<span class="quote">“<span class="quote">Resolver</span>”</span>). All other modules
+ will use the configuration of the first entry
+ (<span class="quote">“<span class="quote">*</span>”</span>). If there was also a configuration
+ entry for <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, the cache library
+ within the resolver would use that in preference to the
+ entry for <span class="quote">“<span class="quote">Resolver</span>”</span>.
+
+ </p><p>
+
+ One final note about the naming. When specifying the
+ module name within a logger, use the name of the module
+ as specified in <span class="command"><strong>bindctl</strong></span>, e.g.
+ <span class="quote">“<span class="quote">Resolver</span>”</span> for the resolver module,
+ <span class="quote">“<span class="quote">Xfrout</span>”</span> for the xfrout module, etc. When
+ the message is logged, the message will include the name
+ of the logger generating the message, but with the module
+ name replaced by the name of the process implementing
+ the module (so for example, a message generated by the
+ <span class="quote">“<span class="quote">Auth.cache</span>”</span> logger will appear in the output
+ with a logger name of <span class="quote">“<span class="quote">b10-auth.cache</span>”</span>).
+
+ </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447923"></a>severity (string)</h4></div></div></div><p>
+
+ This specifies the category of messages logged.
+ Each message is logged with an associated severity which
+ may be one of the following (in descending order of
+ severity):
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> FATAL </li><li class="listitem"> ERROR </li><li class="listitem"> WARN </li><li class="listitem"> INFO </li><li class="listitem"> DEBUG </li></ul></div><p>
+
+ When the severity of a logger is set to one of these
+ values, it will only log messages of that severity, and
+ the severities above it. The severity may also be set to
+ NONE, in which case all messages from that logger are
+ inhibited.
+
+
+
+ </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447973"></a>output_options (list)</h4></div></div></div><p>
+
+ Each logger can have zero or more
+ <code class="option">output_options</code>. These specify where log
+ messages are sent to. These are explained in detail below.
+
+ </p><p>
+
+ The other options for a logger are:
+
+ </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447990"></a>debuglevel (integer)</h4></div></div></div><p>
+
+ When a logger's severity is set to DEBUG, this value
+ specifies what debug messages should be printed. It ranges
+ from 0 (least verbose) to 99 (most verbose).
+ </p><p>
+
+ If severity for the logger is not DEBUG, this value is ignored.
+
+ </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448005"></a>additive (true or false)</h4></div></div></div><p>
+
+ If this is true, the <code class="option">output_options</code> from
+ the parent will be used. For example, if there are two
+ loggers configured; <span class="quote">“<span class="quote">Resolver</span>”</span> and
+ <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, and <code class="option">additive</code>
+ is true in the second, it will write the log messages
+ not only to the destinations specified for
+ <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, but also to the destinations
+ as specified in the <code class="option">output_options</code> in
+ the logger named <span class="quote">“<span class="quote">Resolver</span>”</span>.
+
+
+
+ </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229448040"></a>Output Options</h3></div></div></div><p>
+
+ The main settings for an output option are the
+ <code class="option">destination</code> and a value called
+ <code class="option">output</code>, the meaning of which depends on
+ the destination that is set.
+
+ </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448056"></a>destination (string)</h4></div></div></div><p>
+
+ The destination is the type of output. It can be one of:
+
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448088"></a>output (string)</h4></div></div></div><p>
+
+ Depending on what is set as the output destination, this
+ value is interpreted as follows:
+
+ </p><div class="variablelist"><dl><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">console</span>”</span></span></dt><dd>
+ The value of output must be one of <span class="quote">“<span class="quote">stdout</span>”</span>
+ (messages printed to standard output) or
+ <span class="quote">“<span class="quote">stderr</span>”</span> (messages printed to standard
+ error).
+ </dd><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span></span></dt><dd>
+ The value of output is interpreted as a file name;
+ log messages will be appended to this file.
+ </dd><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">syslog</span>”</span></span></dt><dd>
+ The value of output is interpreted as the
+ <span class="command"><strong>syslog</strong></span> facility (e.g.
+ <span class="emphasis"><em>local0</em></span>) that should be used
+ for log messages.
+ </dd></dl></div><p>
+
+ The other options for <code class="option">output_options</code> are:
+
+ </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448172"></a>flush (true of false)</h5></div></div></div><p>
+ Flush buffers after each log message. Doing this will
+ reduce performance but will ensure that if the program
+ terminates abnormally, all messages up to the point of
+ termination are output.
+ </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448182"></a>maxsize (integer)</h5></div></div></div><p>
+ Only relevant when destination is file, this is maximum
+ file size of output files in bytes. When the maximum
+ size is reached, the file is renamed and a new file opened.
+ (For example, a ".1" is appended to the name —
+ if a ".1" file exists, it is renamed ".2",
+ etc.)
+ </p><p>
+ If this is 0, no maximum file size is used.
+ </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448196"></a>maxver (integer)</h5></div></div></div><p>
+ Maximum number of old log files to keep around when
+ rolling the output file. Only relevant when
+ <code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span>.
+ </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229448215"></a>Example session</h3></div></div></div><p>
+
+ In this example we want to set the global logging to
+ write to the file <code class="filename">/var/log/my_bind10.log</code>,
+ at severity WARN. We want the authoritative server to
+ log at DEBUG with debuglevel 40, to a different file
+ (<code class="filename">/tmp/debug_messages</code>).
+
+ </p><p>
+
+ Start <span class="command"><strong>bindctl</strong></span>.
+
+ </p><p>
+
+ </p><pre class="screen">["login success "]
+> <strong class="userinput"><code>config show Logging</code></strong>
+Logging/loggers [] list
+</pre><p>
+
+ </p><p>
+
+ By default, no specific loggers are configured, in which
+ case the severity defaults to INFO and the output is
+ written to stderr.
+
+ </p><p>
+
+ Let's first add a default logger:
+
+ </p><p>
+
+ </p><pre class="screen"><strong class="userinput"><code>> config add Logging/loggers</code></strong>
+> <strong class="userinput"><code>config show Logging</code></strong>
+Logging/loggers/ list (modified)
+</pre><p>
+
+ </p><p>
+
+ The loggers value line changed to indicate that it is no
+ longer an empty list:
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code>config show Logging/loggers</code></strong>
+Logging/loggers[0]/name "" string (default)
+Logging/loggers[0]/severity "INFO" string (default)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</pre><p>
+
+ </p><p>
+
+ The name is mandatory, so we must set it. We will also
+ change the severity as well. Let's start with the global
+ logger.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code>config set Logging/loggers[0]/name *</code></strong>
+> <strong class="userinput"><code>config set Logging/loggers[0]/severity WARN</code></strong>
+> <strong class="userinput"><code>config show Logging/loggers</code></strong>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</pre><p>
+
+ </p><p>
+
+ Of course, we need to specify where we want the log
+ messages to go, so we add an entry for an output option.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config add Logging/loggers[0]/output_options</code></strong>
+> <strong class="userinput"><code> config show Logging/loggers[0]/output_options</code></strong>
+Logging/loggers[0]/output_options[0]/destination "console" string (default)
+Logging/loggers[0]/output_options[0]/output "stdout" string (default)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 0 integer (default)
+Logging/loggers[0]/output_options[0]/maxver 0 integer (default)
+</pre><p>
+
+
+ </p><p>
+
+ These aren't the values we are looking for.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/destination file</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/maxsize 30000</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/maxver 8</code></strong>
+</pre><p>
+
+ </p><p>
+
+ Which would make the entire configuration for this logger
+ look like:
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config show all Logging/loggers</code></strong>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options[0]/destination "file" string (modified)
+Logging/loggers[0]/output_options[0]/output "/var/log/bind10.log" string (modified)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 30000 integer (modified)
+Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
+</pre><p>
+
+ </p><p>
+
+ That looks OK, so let's commit it before we add the
+ configuration for the authoritative server's logger.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config commit</code></strong></pre><p>
+
+ </p><p>
+
+ Now that we have set it, and checked each value along
+ the way, adding a second entry is quite similar.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config add Logging/loggers</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/name Auth</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/severity DEBUG</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/debuglevel 40</code></strong>
+> <strong class="userinput"><code> config add Logging/loggers[1]/output_options</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/output_options[0]/destination file</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log</code></strong>
+> <strong class="userinput"><code> config commit</code></strong>
+</pre><p>
+
+ </p><p>
+
+ And that's it. Once we have found whatever it was we
+ needed the debug messages for, we can simply remove the
+ second logger to let the authoritative server use the
+ same settings as the rest.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config remove Logging/loggers[1]</code></strong>
+> <strong class="userinput"><code> config commit</code></strong>
+</pre><p>
+
+ </p><p>
+
+ And every module will now be using the values from the
+ logger named <span class="quote">“<span class="quote">*</span>”</span>.
+
+ </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229448428"></a>Logging Message Format</h2></div></div></div><p>
+ Each message written by BIND 10 to the configured logging
+ destinations comprises a number of components that identify
+ the origin of the message and, if the message indicates
+ a problem, information about the problem that may be
+ useful in fixing it.
+ </p><p>
+ Consider the message below logged to a file:
+ </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</pre><p>
- </p><p>
- Note: the layout of messages written to the system logging
- file (syslog) may be slightly different. This message has
- been split across two lines here for display reasons; in the
- logging file, it will appear on one line.)
- </p><p>
- The log message comprises a number of components:
-
- </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
- The date and time at which the message was generated.
- </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
- The severity of the message.
- </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
- The source of the message. This comprises two components:
- the BIND 10 process generating the message (in this
- case, <span class="command"><strong>b10-resolver</strong></span>) and the module
- within the program from which the message originated
- (which in the example is the asynchronous I/O link
- module, asiolink).
- </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
+ </p><p>
+ Note: the layout of messages written to the system logging
+ file (syslog) may be slightly different. This message has
+ been split across two lines here for display reasons; in the
+ logging file, it will appear on one line.)
+ </p><p>
+ The log message comprises a number of components:
+
+ </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
+ The date and time at which the message was generated.
+ </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
+ The severity of the message.
+ </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
+ The source of the message. This comprises two components:
+ the BIND 10 process generating the message (in this
+ case, <span class="command"><strong>b10-resolver</strong></span>) and the module
+ within the program from which the message originated
+ (which in the example is the asynchronous I/O link
+ module, asiolink).
+ </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
The message identification. Every message in BIND 10
has a unique identification, which can be used as an
index into the <a class="ulink" href="bind10-messages.html" target="_top"><em class="citetitle">BIND 10 Messages
Manual</em></a> (<a class="ulink" href="http://bind10.isc.org/docs/bind10-messages.html" target="_top">http://bind10.isc.org/docs/bind10-messages.html</a>) from which more information can be obtained.
- </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
- A brief description of the cause of the problem. Within this text,
- information relating to the condition that caused the message to
- be logged will be included. In this example, error number 111
- (an operating system-specific error number) was encountered when
- trying to open a TCP connection to port 53 on the local system
- (address 127.0.0.1). The next step would be to find out the reason
- for the failure by consulting your system's documentation to
- identify what error number 111 means.
- </p></dd></dl></div><p>
-
- </p></div></div></body></html>
+ </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
+ A brief description of the cause of the problem.
+ Within this text, information relating to the condition
+ that caused the message to be logged will be included.
+ In this example, error number 111 (an operating
+ system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the
+ local system (address 127.0.0.1). The next step
+ would be to find out the reason for the failure by
+ consulting your system's documentation to identify
+ what error number 111 means.
+ </p></dd></dl></div><p>
+ </p></div></div></div></body></html>
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 6a42182..d34746b 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -146,7 +146,7 @@
The processes started by the <command>bind10</command>
command have names starting with "b10-", including:
</para>
-
+
<para>
<itemizedlist>
@@ -241,7 +241,7 @@
<section id="managing_once_running">
<title>Managing BIND 10</title>
-
+
<para>
Once BIND 10 is running, a few commands are used to interact
directly with the system:
@@ -280,7 +280,7 @@
<!-- TODO point to these -->
In addition, manual pages are also provided in the default installation.
</para>
-
+
<!--
bin/
bindctl*
@@ -387,7 +387,7 @@ Debian and Ubuntu:
</para>
<orderedlist>
-
+
<listitem>
<simpara>
Install required build dependencies.
@@ -471,7 +471,7 @@ Debian and Ubuntu:
Downloading a release tar file is the recommended method to
obtain the source code.
</para>
-
+
<para>
The BIND 10 releases are available as tar file downloads from
<ulink url="ftp://ftp.isc.org/isc/bind10/"/>.
@@ -547,37 +547,37 @@ Debian and Ubuntu:
<varlistentry>
<term>--prefix</term>
<listitem>
- <simpara>Define the the installation location (the
+ <simpara>Define the installation location (the
default is <filename>/usr/local/</filename>).
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-boost-include</term>
- <listitem>
+ <listitem>
<simpara>Define the path to find the Boost headers.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-pythonpath</term>
- <listitem>
+ <listitem>
<simpara>Define the path to Python 3.1 if it is not in the
standard execution path.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-gtest</term>
- <listitem>
+ <listitem>
<simpara>Enable building the C++ Unit Tests using the
Google Tests framework. Optionally this can define the
path to the gtest header files and library.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
</variablelist>
@@ -696,13 +696,13 @@ Debian and Ubuntu:
</para>
</section>
-->
-
+
</chapter>
<chapter id="bind10">
<title>Starting BIND10 with <command>bind10</command></title>
<para>
- BIND 10 provides the <command>bind10</command> command which
+ BIND 10 provides the <command>bind10</command> command which
starts up the required processes.
<command>bind10</command>
will also restart processes that exit unexpectedly.
@@ -711,7 +711,7 @@ Debian and Ubuntu:
<para>
After starting the <command>b10-msgq</command> communications channel,
- <command>bind10</command> connects to it,
+ <command>bind10</command> connects to it,
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</para>
@@ -742,6 +742,16 @@ Debian and Ubuntu:
get additional debugging or diagnostic output.
</para>
<!-- TODO: note it doesn't go into background -->
+
+ <note>
+ <para>
+ If the setproctitle Python module is detected at start up,
+ the process names for the Python-based daemons will be renamed
+ to better identify them instead of just <quote>python</quote>.
+ This is not needed on some operating systems.
+ </para>
+ </note>
+
</section>
</chapter>
@@ -769,7 +779,7 @@ Debian and Ubuntu:
<command>b10-msgq</command> service.
It listens on 127.0.0.1.
</para>
-
+
<!-- TODO: this is broken, see Trac #111
<para>
To select an alternate port for the <command>b10-msgq</command> to
@@ -1095,10 +1105,10 @@ since we used bind10 -->
The configuration data item is:
<variablelist>
-
+
<varlistentry>
<term>database_file</term>
- <listitem>
+ <listitem>
<simpara>This is an optional string to define the path to find
the SQLite3 database file.
<!-- TODO: -->
@@ -1120,7 +1130,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>shutdown</term>
- <listitem>
+ <listitem>
<simpara>Stop the authoritative DNS server.
</simpara>
<!-- TODO: what happens when this is sent, will bind10 restart? -->
@@ -1176,7 +1186,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$INCLUDE</term>
- <listitem>
+ <listitem>
<simpara>Loads an additional zone file. This may be recursive.
</simpara>
</listitem>
@@ -1184,7 +1194,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$ORIGIN</term>
- <listitem>
+ <listitem>
<simpara>Defines the relative domain name.
</simpara>
</listitem>
@@ -1192,7 +1202,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$TTL</term>
- <listitem>
+ <listitem>
<simpara>Defines the time-to-live value used for following
records that don't include a TTL.
</simpara>
@@ -1257,7 +1267,7 @@ TODO
<note><simpara>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
<!-- TODO: sqlite3 data source only? -->
@@ -1304,7 +1314,7 @@ what if a NOTIFY is sent?
<note><simpara>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
Access control is not yet provided.
</simpara></note>
@@ -1360,7 +1370,7 @@ what is XfroutClient xfr_client??
<para>
The main <command>bind10</command> process can be configured
- to select to run either the authoritative or resolver.
+ to select to run either the authoritative or resolver or both.
By default, it starts the authoritative service.
<!-- TODO: later both -->
@@ -1380,16 +1390,85 @@ what is XfroutClient xfr_client??
</para>
<para>
- The resolver also needs to be configured to listen on an address
- and port:
+ By default, the resolver listens on port 53 for 127.0.0.1 and ::1.
+ The following example shows how it can be configured to
+ listen on an additional address (and port):
<screen>
-> <userinput>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</userinput>
+> <userinput>config add Resolver/listen_on</userinput>
+> <userinput>config set Resolver/listen_on[<replaceable>2</replaceable>]/address "192.168.1.1"</userinput>
+> <userinput>config set Resolver/listen_on[<replaceable>2</replaceable>]/port 53</userinput>
> <userinput>config commit</userinput>
</screen>
</para>
-<!-- TODO: later the above will have some defaults -->
+ <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+ as needed; run <quote><userinput>config show
+ Resolver/listen_on</userinput></quote> if needed.)</simpara>
+<!-- TODO: this example should not include the port, ticket #1185 -->
+
+ <section>
+ <title>Access Control</title>
+
+ <para>
+ By default, the <command>b10-resolver</command> daemon only accepts
+ DNS queries from the localhost (127.0.0.1 and ::1).
+ The <option>Resolver/query_acl</option> configuration may
+ be used to reject, drop, or allow specific IPs or networks.
+ This configuration list is first match.
+ </para>
+
+ <para>
+ The configuration's <option>action</option> item may be
+ set to <quote>ACCEPT</quote> to allow the incoming query,
+ <quote>REJECT</quote> to respond with a DNS REFUSED return
+ code, or <quote>DROP</quote> to ignore the query without
+ any response (such as a blackhole). For more information,
+ see the respective debugging messages: <ulink
+ url="bind10-messages.html#RESOLVER_QUERY_ACCEPTED">RESOLVER_QUERY_ACCEPTED</ulink>,
+ <ulink
+ url="bind10-messages.html#RESOLVER_QUERY_REJECTED">RESOLVER_QUERY_REJECTED</ulink>,
+ and <ulink
+url="bind10-messages.html#RESOLVER_QUERY_DROPPED">RESOLVER_QUERY_DROPPED</ulink>.
+ </para>
+
+ <para>
+ The required configuration's <option>from</option> item is set
+ to an IPv4 or IPv6 address, addresses with an network mask, or to
+ the special lowercase keywords <quote>any6</quote> (for
+ any IPv6 address) or <quote>any4</quote> (for any IPv4
+ address).
+ </para>
+
+<!-- TODO:
+/0 is for any address in that address family
+does that need any address too?
+
+TODO: tsig
+-->
+
+ <para>
+ For example to allow the <replaceable>192.168.1.0/24</replaceable>
+ network to use your recursive name server, at the
+ <command>bindctl</command> prompt run:
+ </para>
+
+ <screen>
+> <userinput>config add Resolver/query_acl</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/action "ACCEPT"</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/from "<replaceable>192.168.1.0/24</replaceable>"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+
+ <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+ as needed; run <quote><userinput>config show
+ Resolver/query_acl</userinput></quote> if needed.)</simpara>
+
+<!-- TODO: check this -->
+ <note><simpara>This prototype access control configuration
+ syntax may be changed.</simpara></note>
+
+ </section>
<section>
<title>Forwarding</title>
@@ -1470,61 +1549,679 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
<chapter id="logging">
<title>Logging</title>
-<!-- TODO: how to configure logging, logging destinations etc. -->
+ <section>
+ <title>Logging configuration</title>
- <para>
- Each message written by BIND 10 to the configured logging destinations
- comprises a number of components that identify the origin of the
- message and, if the message indicates a problem, information about the
- problem that may be useful in fixing it.
- </para>
+ <para>
- <para>
- Consider the message below logged to a file:
- <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
- ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
- </para>
+ The logging system in BIND 10 is configured through the
+ Logging module. All BIND 10 modules will look at the
+ configuration in Logging to see what should be logged and
+ to where.
- <para>
- Note: the layout of messages written to the system logging
- file (syslog) may be slightly different. This message has
- been split across two lines here for display reasons; in the
- logging file, it will appear on one line.)
- </para>
+<!-- TODO: what is context of Logging module for readers of this guide? -->
- <para>
- The log message comprises a number of components:
+ </para>
+
+ <section>
+ <title>Loggers</title>
+
+ <para>
+
+ Within BIND 10, a message is logged through a component
+ called a "logger". Different parts of BIND 10 log messages
+ through different loggers, and each logger can be configured
+ independently of one another.
+
+ </para>
+
+ <para>
+
+ In the Logging module, you can specify the configuration
+ for zero or more loggers; any that are not specified will
+ take appropriate default values..
+
+ </para>
+
+ <para>
+
+ The three most important elements of a logger configuration
+ are the <option>name</option> (the component that is
+ generating the messages), the <option>severity</option>
+ (what to log), and the <option>output_options</option>
+ (where to log).
+
+ </para>
+
+ <section>
+ <title>name (string)</title>
+
+ <para>
+ Each logger in the system has a name, the name being that
+ of the component using it to log messages. For instance,
+ if you want to configure logging for the resolver module,
+ you add an entry for a logger named <quote>Resolver</quote>. This
+ configuration will then be used by the loggers in the
+ Resolver module, and all the libraries used by it.
+ </para>
+
+<!-- TODO: later we will have a way to know names of all modules
+
+Right now you can only see what their names are if they are running
+(a simple 'help' without anything else in bindctl for instance).
+
+ -->
+
+ <para>
+
+ If you want to specify logging for one specific library
+ within the module, you set the name to
+ <replaceable>module.library</replaceable>. For example, the
+ logger used by the nameserver address store component
+ has the full name of <quote>Resolver.nsas</quote>. If
+ there is no entry in Logging for a particular library,
+ it will use the configuration given for the module.
+
+<!-- TODO: how to know these specific names?
+
+We will either have to document them or tell the administrator to
+specify module-wide logging and see what appears...
+
+-->
+
+ </para>
+
+ <para>
+
+<!-- TODO: severity has not been covered yet -->
+
+ To illustrate this, suppose you want the cache library
+ to log messages of severity DEBUG, and the rest of the
+ resolver code to log messages of severity INFO. To achieve
+ this you specify two loggers, one with the name
+ <quote>Resolver</quote> and severity INFO, and one with
+ the name <quote>Resolver.cache</quote> with severity
+ DEBUG. As there are no entries for other libraries (e.g.
+ the nsas), they will use the configuration for the module
+ (<quote>Resolver</quote>), so giving the desired behavior.
+
+ </para>
+
+ <para>
+
+ One special case is that of a module name of <quote>*</quote>
+ (asterisks), which is interpreted as <emphasis>any</emphasis>
+ module. You can set global logging options by using this,
+ including setting the logging configuration for a library
+ that is used by multiple modules (e.g. <quote>*.config</quote>
+ specifies the configuration library code in whatever
+ module is using it).
+
+ </para>
+
+ <para>
+
+ If there are multiple logger specifications in the
+ configuration that might match a particular logger, the
+ specification with the more specific logger name takes
+ precedence. For example, if there are entries for for
+ both <quote>*</quote> and <quote>Resolver</quote>, the
+ resolver module — and all libraries it uses —
+ will log messages according to the configuration in the
+ second entry (<quote>Resolver</quote>). All other modules
+ will use the configuration of the first entry
+ (<quote>*</quote>). If there was also a configuration
+ entry for <quote>Resolver.cache</quote>, the cache library
+ within the resolver would use that in preference to the
+ entry for <quote>Resolver</quote>.
+
+ </para>
+
+ <para>
+
+ One final note about the naming. When specifying the
+ module name within a logger, use the name of the module
+ as specified in <command>bindctl</command>, e.g.
+ <quote>Resolver</quote> for the resolver module,
+ <quote>Xfrout</quote> for the xfrout module, etc. When
+ the message is logged, the message will include the name
+ of the logger generating the message, but with the module
+ name replaced by the name of the process implementing
+ the module (so for example, a message generated by the
+ <quote>Auth.cache</quote> logger will appear in the output
+ with a logger name of <quote>b10-auth.cache</quote>).
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>severity (string)</title>
+
+ <para>
+
+ This specifies the category of messages logged.
+ Each message is logged with an associated severity which
+ may be one of the following (in descending order of
+ severity):
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <simpara> FATAL </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> ERROR </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> WARN </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> INFO </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> DEBUG </simpara>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+
+ When the severity of a logger is set to one of these
+ values, it will only log messages of that severity, and
+ the severities above it. The severity may also be set to
+ NONE, in which case all messages from that logger are
+ inhibited.
+
+<!-- TODO: worded wrong? If I set to INFO, why would it show DEBUG which is literally below in that list? -->
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>output_options (list)</title>
+
+ <para>
+
+ Each logger can have zero or more
+ <option>output_options</option>. These specify where log
+ messages are sent to. These are explained in detail below.
+
+ </para>
+
+ <para>
+
+ The other options for a logger are:
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>debuglevel (integer)</title>
+
+ <para>
+
+ When a logger's severity is set to DEBUG, this value
+ specifies what debug messages should be printed. It ranges
+ from 0 (least verbose) to 99 (most verbose).
+ </para>
+
+
+<!-- TODO: complete this sentence:
+
+ The general classification of debug message types is
+
+TODO; there's a ticket to determine these levels, see #1074
+
+ -->
+
+ <para>
+
+ If severity for the logger is not DEBUG, this value is ignored.
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>additive (true or false)</title>
+
+ <para>
+
+ If this is true, the <option>output_options</option> from
+ the parent will be used. For example, if there are two
+ loggers configured; <quote>Resolver</quote> and
+ <quote>Resolver.cache</quote>, and <option>additive</option>
+ is true in the second, it will write the log messages
+ not only to the destinations specified for
+ <quote>Resolver.cache</quote>, but also to the destinations
+ as specified in the <option>output_options</option> in
+ the logger named <quote>Resolver</quote>.
+
+<!-- TODO: check this -->
+
+ </para>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Output Options</title>
+
+ <para>
+
+ The main settings for an output option are the
+ <option>destination</option> and a value called
+ <option>output</option>, the meaning of which depends on
+ the destination that is set.
+
+ </para>
+
+ <section>
+ <title>destination (string)</title>
+
+ <para>
+
+ The destination is the type of output. It can be one of:
+
+ </para>
+
+ <itemizedlist>
+
+ <listitem>
+ <simpara> console </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> file </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> syslog </simpara>
+ </listitem>
+
+ </itemizedlist>
+
+ </section>
+
+ <section>
+ <title>output (string)</title>
+
+ <para>
+
+ Depending on what is set as the output destination, this
+ value is interpreted as follows:
+
+ </para>
<variablelist>
- <varlistentry>
- <term>2011-06-15 13:48:22.034</term>
- <listitem><para>
- The date and time at which the message was generated.
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>ERROR</term>
- <listitem><para>
- The severity of the message.
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>[b10-resolver.asiolink]</term>
- <listitem><para>
- The source of the message. This comprises two components:
- the BIND 10 process generating the message (in this
- case, <command>b10-resolver</command>) and the module
- within the program from which the message originated
- (which in the example is the asynchronous I/O link
- module, asiolink).
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>ASIODNS_OPENSOCK</term>
- <listitem><para>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>console</quote></term>
+ <listitem>
+ <simpara>
+ The value of output must be one of <quote>stdout</quote>
+ (messages printed to standard output) or
+ <quote>stderr</quote> (messages printed to standard
+ error).
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>file</quote></term>
+ <listitem>
+ <simpara>
+ The value of output is interpreted as a file name;
+ log messages will be appended to this file.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>syslog</quote></term>
+ <listitem>
+ <simpara>
+ The value of output is interpreted as the
+ <command>syslog</command> facility (e.g.
+ <emphasis>local0</emphasis>) that should be used
+ for log messages.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <para>
+
+ The other options for <option>output_options</option> are:
+
+ </para>
+
+ <section>
+ <title>flush (true of false)</title>
+
+ <para>
+ Flush buffers after each log message. Doing this will
+ reduce performance but will ensure that if the program
+ terminates abnormally, all messages up to the point of
+ termination are output.
+ </para>
+
+ </section>
+
+ <section>
+ <title>maxsize (integer)</title>
+
+ <para>
+ Only relevant when destination is file, this is maximum
+ file size of output files in bytes. When the maximum
+ size is reached, the file is renamed and a new file opened.
+ (For example, a ".1" is appended to the name —
+ if a ".1" file exists, it is renamed ".2",
+ etc.)
+ </para>
+
+ <para>
+ If this is 0, no maximum file size is used.
+ </para>
+
+ </section>
+
+ <section>
+ <title>maxver (integer)</title>
+
+ <para>
+ Maximum number of old log files to keep around when
+ rolling the output file. Only relevant when
+ <option>destination</option> is <quote>file</quote>.
+ </para>
+
+ </section>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Example session</title>
+
+ <para>
+
+ In this example we want to set the global logging to
+ write to the file <filename>/var/log/my_bind10.log</filename>,
+ at severity WARN. We want the authoritative server to
+ log at DEBUG with debuglevel 40, to a different file
+ (<filename>/tmp/debug_messages</filename>).
+
+ </para>
+
+ <para>
+
+ Start <command>bindctl</command>.
+
+ </para>
+
+ <para>
+
+ <screen>["login success "]
+> <userinput>config show Logging</userinput>
+Logging/loggers [] list
+</screen>
+
+ </para>
+
+ <para>
+
+ By default, no specific loggers are configured, in which
+ case the severity defaults to INFO and the output is
+ written to stderr.
+
+ </para>
+
+ <para>
+
+ Let's first add a default logger:
+
+ </para>
+
+<!-- TODO: adding the empty loggers makes no sense -->
+ <para>
+
+ <screen><userinput>> config add Logging/loggers</userinput>
+> <userinput>config show Logging</userinput>
+Logging/loggers/ list (modified)
+</screen>
+
+ </para>
+
+ <para>
+
+ The loggers value line changed to indicate that it is no
+ longer an empty list:
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput>config show Logging/loggers</userinput>
+Logging/loggers[0]/name "" string (default)
+Logging/loggers[0]/severity "INFO" string (default)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</screen>
+
+ </para>
+
+ <para>
+
+ The name is mandatory, so we must set it. We will also
+ change the severity as well. Let's start with the global
+ logger.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput>config set Logging/loggers[0]/name *</userinput>
+> <userinput>config set Logging/loggers[0]/severity WARN</userinput>
+> <userinput>config show Logging/loggers</userinput>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</screen>
+
+ </para>
+
+ <para>
+
+ Of course, we need to specify where we want the log
+ messages to go, so we add an entry for an output option.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config add Logging/loggers[0]/output_options</userinput>
+> <userinput> config show Logging/loggers[0]/output_options</userinput>
+Logging/loggers[0]/output_options[0]/destination "console" string (default)
+Logging/loggers[0]/output_options[0]/output "stdout" string (default)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 0 integer (default)
+Logging/loggers[0]/output_options[0]/maxver 0 integer (default)
+</screen>
+
+
+ </para>
+
+ <para>
+
+ These aren't the values we are looking for.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config set Logging/loggers[0]/output_options[0]/destination file</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/maxsize 30000</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/maxver 8</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ Which would make the entire configuration for this logger
+ look like:
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config show all Logging/loggers</userinput>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options[0]/destination "file" string (modified)
+Logging/loggers[0]/output_options[0]/output "/var/log/bind10.log" string (modified)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 30000 integer (modified)
+Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
+</screen>
+
+ </para>
+
+ <para>
+
+ That looks OK, so let's commit it before we add the
+ configuration for the authoritative server's logger.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config commit</userinput></screen>
+
+ </para>
+
+ <para>
+
+ Now that we have set it, and checked each value along
+ the way, adding a second entry is quite similar.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config add Logging/loggers</userinput>
+> <userinput> config set Logging/loggers[1]/name Auth</userinput>
+> <userinput> config set Logging/loggers[1]/severity DEBUG</userinput>
+> <userinput> config set Logging/loggers[1]/debuglevel 40</userinput>
+> <userinput> config add Logging/loggers[1]/output_options</userinput>
+> <userinput> config set Logging/loggers[1]/output_options[0]/destination file</userinput>
+> <userinput> config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log</userinput>
+> <userinput> config commit</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ And that's it. Once we have found whatever it was we
+ needed the debug messages for, we can simply remove the
+ second logger to let the authoritative server use the
+ same settings as the rest.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config remove Logging/loggers[1]</userinput>
+> <userinput> config commit</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ And every module will now be using the values from the
+ logger named <quote>*</quote>.
+
+ </para>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Logging Message Format</title>
+
+ <para>
+ Each message written by BIND 10 to the configured logging
+ destinations comprises a number of components that identify
+ the origin of the message and, if the message indicates
+ a problem, information about the problem that may be
+ useful in fixing it.
+ </para>
+
+ <para>
+ Consider the message below logged to a file:
+ <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
+ </para>
+
+ <para>
+ Note: the layout of messages written to the system logging
+ file (syslog) may be slightly different. This message has
+ been split across two lines here for display reasons; in the
+ logging file, it will appear on one line.)
+ </para>
+
+ <para>
+ The log message comprises a number of components:
+
+ <variablelist>
+ <varlistentry>
+ <term>2011-06-15 13:48:22.034</term>
+<!-- TODO: timestamp repeated even if using syslog? -->
+ <listitem><para>
+ The date and time at which the message was generated.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ERROR</term>
+ <listitem><para>
+ The severity of the message.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>[b10-resolver.asiolink]</term>
+ <listitem><para>
+ The source of the message. This comprises two components:
+ the BIND 10 process generating the message (in this
+ case, <command>b10-resolver</command>) and the module
+ within the program from which the message originated
+ (which in the example is the asynchronous I/O link
+ module, asiolink).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ASIODNS_OPENSOCK</term>
+ <listitem><para>
The message identification. Every message in BIND 10
has a unique identification, which can be used as an
index into the <ulink
@@ -1532,25 +2229,29 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
Manual</citetitle></ulink> (<ulink
url="http://bind10.isc.org/docs/bind10-messages.html"
/>) from which more information can be obtained.
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
- <listitem><para>
- A brief description of the cause of the problem. Within this text,
- information relating to the condition that caused the message to
- be logged will be included. In this example, error number 111
- (an operating system-specific error number) was encountered when
- trying to open a TCP connection to port 53 on the local system
- (address 127.0.0.1). The next step would be to find out the reason
- for the failure by consulting your system's documentation to
- identify what error number 111 means.
- </para></listitem>
- </varlistentry>
- </variablelist>
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
+ <listitem><para>
+ A brief description of the cause of the problem.
+ Within this text, information relating to the condition
+ that caused the message to be logged will be included.
+ In this example, error number 111 (an operating
+ system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the
+ local system (address 127.0.0.1). The next step
+ would be to find out the reason for the failure by
+ consulting your system's documentation to identify
+ what error number 111 means.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ </section>
- </para>
</chapter>
<!-- TODO: how to help: run unit tests, join lists, review trac tickets -->
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
index b075e96..237b7ad 100644
--- a/doc/guide/bind10-messages.html
+++ b/doc/guide/bind10-messages.html
@@ -1,10 +1,10 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
- 20110519.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+ 20110809.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the messages manual for BIND 10 version 20110519.
+ This is the messages manual for BIND 10 version 20110809.
The most up-to-date version of this document, along with
other documents for BIND 10, can be found at
<a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
@@ -26,38 +26,635 @@
For information on configuring and using BIND 10 logging,
refer to the <a class="ulink" href="bind10-guide.html" target="_top">BIND 10 Guide</a>.
</p></div><div class="chapter" title="Chapter 2. BIND 10 Messages"><div class="titlepage"><div><div><h2 class="title"><a name="messages"></a>Chapter 2. BIND 10 Messages</h2></div></div></div><p>
- </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCHCOMP"></a><span class="term">ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</span></dt><dd><p>
-A debug message, this records the the upstream fetch (a query made by the
+ </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCH_COMPLETED"></a><span class="term">ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</span></dt><dd><p>
+A debug message, this records that the upstream fetch (a query made by the
resolver on behalf of its client) to the specified address has completed.
-</p></dd><dt><a name="ASIODNS_FETCHSTOP"></a><span class="term">ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_FETCH_STOPPED"></a><span class="term">ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
An external component has requested the halting of an upstream fetch. This
is an allowed operation, and the message should only appear if debug is
enabled.
-</p></dd><dt><a name="ASIODNS_OPENSOCK"></a><span class="term">ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_OPEN_SOCKET"></a><span class="term">ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
The asynchronous I/O code encountered an error when trying to open a socket
of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
message.
-</p></dd><dt><a name="ASIODNS_RECVSOCK"></a><span class="term">ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</span></dt><dd><p>
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
-</p></dd><dt><a name="ASIODNS_RECVTMO"></a><span class="term">ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_READ_DATA"></a><span class="term">ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_READ_TIMEOUT"></a><span class="term">ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
An upstream fetch from the specified address timed out. This may happen for
any number of reasons and is most probably a problem at the remote server
or a problem on the network. The message will only appear if debug is
enabled.
-</p></dd><dt><a name="ASIODNS_SENDSOCK"></a><span class="term">ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
-</p></dd><dt><a name="ASIODNS_UNKORIGIN"></a><span class="term">ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
-</p></dd><dt><a name="ASIODNS_UNKRESULT"></a><span class="term">ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message). This message should
-not appear and may indicate an internal error. Please enter a bug report.
+</p></dd><dt><a name="ASIODNS_SEND_DATA"></a><span class="term">ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_ORIGIN"></a><span class="term">ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_RESULT"></a><span class="term">ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
+</p></dd><dt><a name="AUTH_AXFR_ERROR"></a><span class="term">AUTH_AXFR_ERROR error handling AXFR request: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</p></dd><dt><a name="AUTH_AXFR_UDP"></a><span class="term">AUTH_AXFR_UDP AXFR query received over UDP</span></dt><dd><p>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_COMMAND_FAILED"></a><span class="term">AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</span></dt><dd><p>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_CREATED"></a><span class="term">AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</span></dt><dd><p>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</span></dt><dd><p>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_STARTED"></a><span class="term">AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</span></dt><dd><p>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_LOAD_FAIL"></a><span class="term">AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</span></dt><dd><p>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</p></dd><dt><a name="AUTH_CONFIG_UPDATE_FAIL"></a><span class="term">AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</span></dt><dd><p>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</p></dd><dt><a name="AUTH_DATA_SOURCE"></a><span class="term">AUTH_DATA_SOURCE data source database file: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</p></dd><dt><a name="AUTH_DNS_SERVICES_CREATED"></a><span class="term">AUTH_DNS_SERVICES_CREATED DNS services created</span></dt><dd><p>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_HEADER_PARSE_FAIL"></a><span class="term">AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</p></dd><dt><a name="AUTH_LOAD_TSIG"></a><span class="term">AUTH_LOAD_TSIG loading TSIG keys</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_LOAD_ZONE"></a><span class="term">AUTH_LOAD_ZONE loaded zone %1/%2</span></dt><dd><p>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_DISABLED"></a><span class="term">AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_ENABLED"></a><span class="term">AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</p></dd><dt><a name="AUTH_NOTIFY_QUESTIONS"></a><span class="term">AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</p></dd><dt><a name="AUTH_NOTIFY_RRTYPE"></a><span class="term">AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_NO_STATS_SESSION"></a><span class="term">AUTH_NO_STATS_SESSION session interface for statistics is not available</span></dt><dd><p>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</p></dd><dt><a name="AUTH_NO_XFRIN"></a><span class="term">AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PARSE_ERROR"></a><span class="term">AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PROTOCOL_ERROR"></a><span class="term">AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_RECEIVED"></a><span class="term">AUTH_PACKET_RECEIVED message received:\n%1</span></dt><dd><p>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</p><p>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_PROCESS_FAIL"></a><span class="term">AUTH_PROCESS_FAIL message processing failure: %1</span></dt><dd><p>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</p><p>
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
+</p></dd><dt><a name="AUTH_RECEIVED_COMMAND"></a><span class="term">AUTH_RECEIVED_COMMAND command '%1' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</p></dd><dt><a name="AUTH_RECEIVED_SENDSTATS"></a><span class="term">AUTH_RECEIVED_SENDSTATS command 'sendstats' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</p></dd><dt><a name="AUTH_RESPONSE_RECEIVED"></a><span class="term">AUTH_RESPONSE_RECEIVED received response message, ignoring</span></dt><dd><p>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</p></dd><dt><a name="AUTH_SEND_ERROR_RESPONSE"></a><span class="term">AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SEND_NORMAL_RESPONSE"></a><span class="term">AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SERVER_CREATED"></a><span class="term">AUTH_SERVER_CREATED server created</span></dt><dd><p>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</p></dd><dt><a name="AUTH_SERVER_FAILED"></a><span class="term">AUTH_SERVER_FAILED server failed: %1</span></dt><dd><p>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</p></dd><dt><a name="AUTH_SERVER_STARTED"></a><span class="term">AUTH_SERVER_STARTED server started</span></dt><dd><p>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</p></dd><dt><a name="AUTH_SQLITE3"></a><span class="term">AUTH_SQLITE3 nothing to do for loading sqlite3</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_CREATED"></a><span class="term">AUTH_STATS_CHANNEL_CREATED STATS session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_COMMS"></a><span class="term">AUTH_STATS_COMMS communication error in sending statistics data: %1</span></dt><dd><p>
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMEOUT"></a><span class="term">AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</span></dt><dd><p>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMER_DISABLED"></a><span class="term">AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</p></dd><dt><a name="AUTH_STATS_TIMER_SET"></a><span class="term">AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</p></dd><dt><a name="AUTH_UNSUPPORTED_OPCODE"></a><span class="term">AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</span></dt><dd><p>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_CREATED"></a><span class="term">AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_ZONEMGR_COMMS"></a><span class="term">AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</p></dd><dt><a name="AUTH_ZONEMGR_ERROR"></a><span class="term">AUTH_ZONEMGR_ERROR received error response from zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</p></dd><dt><a name="BIND10_CHECK_MSGQ_ALREADY_RUNNING"></a><span class="term">BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running</span></dt><dd><p>
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+</p></dd><dt><a name="BIND10_CONFIGURATION_START_AUTH"></a><span class="term">BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</span></dt><dd><p>
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+</p></dd><dt><a name="BIND10_CONFIGURATION_START_RESOLVER"></a><span class="term">BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</span></dt><dd><p>
+This message shows whether or not the resolver should be
+started according to the configuration.
+</p></dd><dt><a name="BIND10_INVALID_USER"></a><span class="term">BIND10_INVALID_USER invalid user: %1</span></dt><dd><p>
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+</p></dd><dt><a name="BIND10_KILLING_ALL_PROCESSES"></a><span class="term">BIND10_KILLING_ALL_PROCESSES killing all started processes</span></dt><dd><p>
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+</p></dd><dt><a name="BIND10_KILL_PROCESS"></a><span class="term">BIND10_KILL_PROCESS killing process %1</span></dt><dd><p>
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+</p></dd><dt><a name="BIND10_MSGQ_ALREADY_RUNNING"></a><span class="term">BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start</span></dt><dd><p>
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+</p></dd><dt><a name="BIND10_MSGQ_DAEMON_ENDED"></a><span class="term">BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</span></dt><dd><p>
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+</p></dd><dt><a name="BIND10_MSGQ_DISAPPEARED"></a><span class="term">BIND10_MSGQ_DISAPPEARED msgq channel disappeared</span></dt><dd><p>
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED_NO_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</span></dt><dd><p>
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</span></dt><dd><p>
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+</p></dd><dt><a name="BIND10_READING_BOSS_CONFIGURATION"></a><span class="term">BIND10_READING_BOSS_CONFIGURATION reading boss configuration</span></dt><dd><p>
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+</p></dd><dt><a name="BIND10_RECEIVED_COMMAND"></a><span class="term">BIND10_RECEIVED_COMMAND received command: %1</span></dt><dd><p>
+The boss module received a command and shall now process it. The command
+is printed.
+</p></dd><dt><a name="BIND10_RECEIVED_NEW_CONFIGURATION"></a><span class="term">BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1</span></dt><dd><p>
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+</p></dd><dt><a name="BIND10_RECEIVED_SIGNAL"></a><span class="term">BIND10_RECEIVED_SIGNAL received signal %1</span></dt><dd><p>
+The boss module received the given signal.
+</p></dd><dt><a name="BIND10_RESURRECTED_PROCESS"></a><span class="term">BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)</span></dt><dd><p>
+The given process has been restarted successfully, and is now running
+with the given process id.
+</p></dd><dt><a name="BIND10_RESURRECTING_PROCESS"></a><span class="term">BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...</span></dt><dd><p>
+The given process has ended unexpectedly, and is now restarted.
+</p></dd><dt><a name="BIND10_SELECT_ERROR"></a><span class="term">BIND10_SELECT_ERROR error in select() call: %1</span></dt><dd><p>
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+</p></dd><dt><a name="BIND10_SEND_SIGKILL"></a><span class="term">BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)</span></dt><dd><p>
+The boss module is sending a SIGKILL signal to the given process.
+</p></dd><dt><a name="BIND10_SEND_SIGTERM"></a><span class="term">BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</span></dt><dd><p>
+The boss module is sending a SIGTERM signal to the given process.
+</p></dd><dt><a name="BIND10_SHUTDOWN"></a><span class="term">BIND10_SHUTDOWN stopping the server</span></dt><dd><p>
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+</p></dd><dt><a name="BIND10_SHUTDOWN_COMPLETE"></a><span class="term">BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete</span></dt><dd><p>
+All child processes have been stopped, and the boss process will now
+stop itself.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_CAUSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1</span></dt><dd><p>
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_RESPONSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</span></dt><dd><p>
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_CRASHED"></a><span class="term">BIND10_SOCKCREATOR_CRASHED the socket creator crashed</span></dt><dd><p>
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_EOF"></a><span class="term">BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</span></dt><dd><p>
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_INIT"></a><span class="term">BIND10_SOCKCREATOR_INIT initializing socket creator parser</span></dt><dd><p>
+The boss module initializes routines for parsing the socket creator
+protocol.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_KILL"></a><span class="term">BIND10_SOCKCREATOR_KILL killing the socket creator</span></dt><dd><p>
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_TERMINATE"></a><span class="term">BIND10_SOCKCREATOR_TERMINATE terminating socket creator</span></dt><dd><p>
+The boss module sends a request to terminate to the socket creator.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_TRANSPORT_ERROR"></a><span class="term">BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1</span></dt><dd><p>
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+</p></dd><dt><a name="BIND10_SOCKET_CREATED"></a><span class="term">BIND10_SOCKET_CREATED successfully created socket %1</span></dt><dd><p>
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+</p></dd><dt><a name="BIND10_SOCKET_ERROR"></a><span class="term">BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3</span></dt><dd><p>
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+</p></dd><dt><a name="BIND10_SOCKET_GET"></a><span class="term">BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</span></dt><dd><p>
+The boss forwards a request for a socket to the socket creator.
+</p></dd><dt><a name="BIND10_STARTED_PROCESS"></a><span class="term">BIND10_STARTED_PROCESS started %1</span></dt><dd><p>
+The given process has successfully been started.
+</p></dd><dt><a name="BIND10_STARTED_PROCESS_PID"></a><span class="term">BIND10_STARTED_PROCESS_PID started %1 (PID %2)</span></dt><dd><p>
+The given process has successfully been started, and has the given PID.
+</p></dd><dt><a name="BIND10_STARTING"></a><span class="term">BIND10_STARTING starting BIND10: %1</span></dt><dd><p>
+Informational message on startup that shows the full version.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS"></a><span class="term">BIND10_STARTING_PROCESS starting process %1</span></dt><dd><p>
+The boss module is starting the given process.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT"></a><span class="term">BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</span></dt><dd><p>
+The boss module is starting the given process, which will listen on the
+given port number.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT_ADDRESS"></a><span class="term">BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)</span></dt><dd><p>
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+</p></dd><dt><a name="BIND10_STARTUP_COMPLETE"></a><span class="term">BIND10_STARTUP_COMPLETE BIND 10 started</span></dt><dd><p>
+All modules have been successfully started, and BIND 10 is now running.
+</p></dd><dt><a name="BIND10_STARTUP_ERROR"></a><span class="term">BIND10_STARTUP_ERROR error during startup: %1</span></dt><dd><p>
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT"></a><span class="term">BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</span></dt><dd><p>
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</p></dd><dt><a name="BIND10_STOP_PROCESS"></a><span class="term">BIND10_STOP_PROCESS asking %1 to shut down</span></dt><dd><p>
+The boss module is sending a shutdown command to the given module over
+the message channel.
+</p></dd><dt><a name="BIND10_UNKNOWN_CHILD_PROCESS_ENDED"></a><span class="term">BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</span></dt><dd><p>
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+</p></dd><dt><a name="CACHE_ENTRY_MISSING_RRSET"></a><span class="term">CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</span></dt><dd><p>
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+</p></dd><dt><a name="CACHE_LOCALZONE_FOUND"></a><span class="term">CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data</span></dt><dd><p>
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+</p></dd><dt><a name="CACHE_LOCALZONE_UNKNOWN"></a><span class="term">CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data</span></dt><dd><p>
+Debug message. The requested data was not found in the local zone data.
+</p></dd><dt><a name="CACHE_LOCALZONE_UPDATE"></a><span class="term">CACHE_LOCALZONE_UPDATE updating local zone element at key %1</span></dt><dd><p>
+Debug message issued when there's update to the local zone section of cache.
+</p></dd><dt><a name="CACHE_MESSAGES_DEINIT"></a><span class="term">CACHE_MESSAGES_DEINIT deinitialized message cache</span></dt><dd><p>
+Debug message. It is issued when the server deinitializes the message cache.
+</p></dd><dt><a name="CACHE_MESSAGES_EXPIRED"></a><span class="term">CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache</span></dt><dd><p>
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+</p></dd><dt><a name="CACHE_MESSAGES_FOUND"></a><span class="term">CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache</span></dt><dd><p>
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+</p></dd><dt><a name="CACHE_MESSAGES_INIT"></a><span class="term">CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2</span></dt><dd><p>
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+</p></dd><dt><a name="CACHE_MESSAGES_REMOVE"></a><span class="term">CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first</span></dt><dd><p>
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+</p></dd><dt><a name="CACHE_MESSAGES_UNCACHEABLE"></a><span class="term">CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3</span></dt><dd><p>
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+</p></dd><dt><a name="CACHE_MESSAGES_UNKNOWN"></a><span class="term">CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache</span></dt><dd><p>
+Debug message. The message cache didn't find any entry for the given key.
+</p></dd><dt><a name="CACHE_MESSAGES_UPDATE"></a><span class="term">CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3</span></dt><dd><p>
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
+</p></dd><dt><a name="CACHE_RESOLVER_DEEPEST"></a><span class="term">CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+</p></dd><dt><a name="CACHE_RESOLVER_INIT"></a><span class="term">CACHE_RESOLVER_INIT initializing resolver cache for class %1</span></dt><dd><p>
+Debug message. The resolver cache is being created for this given class.
+</p></dd><dt><a name="CACHE_RESOLVER_INIT_INFO"></a><span class="term">CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1</span></dt><dd><p>
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+</p></dd><dt><a name="CACHE_RESOLVER_LOCAL_MSG"></a><span class="term">CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data</span></dt><dd><p>
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+</p></dd><dt><a name="CACHE_RESOLVER_LOCAL_RRSET"></a><span class="term">CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data</span></dt><dd><p>
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+</p></dd><dt><a name="CACHE_RESOLVER_LOOKUP_MSG"></a><span class="term">CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+</p></dd><dt><a name="CACHE_RESOLVER_LOOKUP_RRSET"></a><span class="term">CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+</p></dd><dt><a name="CACHE_RESOLVER_NO_QUESTION"></a><span class="term">CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section</span></dt><dd><p>
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+</p></dd><dt><a name="CACHE_RESOLVER_UNKNOWN_CLASS_MSG"></a><span class="term">CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1</span></dt><dd><p>
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+</p></dd><dt><a name="CACHE_RESOLVER_UNKNOWN_CLASS_RRSET"></a><span class="term">CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1</span></dt><dd><p>
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_MSG"></a><span class="term">CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3</span></dt><dd><p>
+Debug message. The resolver is updating a message in the cache.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_RRSET"></a><span class="term">CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3</span></dt><dd><p>
+Debug message. The resolver is updating an RRset in the cache.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG"></a><span class="term">CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1</span></dt><dd><p>
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET"></a><span class="term">CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1</span></dt><dd><p>
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+</p></dd><dt><a name="CACHE_RRSET_EXPIRED"></a><span class="term">CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3</span></dt><dd><p>
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+</p></dd><dt><a name="CACHE_RRSET_INIT"></a><span class="term">CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2</span></dt><dd><p>
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+</p></dd><dt><a name="CACHE_RRSET_LOOKUP"></a><span class="term">CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</span></dt><dd><p>
+Debug message. The resolver is trying to look up data in the RRset cache.
+</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+</p></dd><dt><a name="CACHE_RRSET_REMOVE_OLD"></a><span class="term">CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+</p></dd><dt><a name="CACHE_RRSET_UNTRUSTED"></a><span class="term">CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+</p></dd><dt><a name="CACHE_RRSET_UPDATE"></a><span class="term">CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache</span></dt><dd><p>
+Debug message. The RRset is updating its data with this given RRset.
+</p></dd><dt><a name="CC_ASYNC_READ_FAILED"></a><span class="term">CC_ASYNC_READ_FAILED asynchronous read failed</span></dt><dd><p>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</p></dd><dt><a name="CC_CONN_ERROR"></a><span class="term">CC_CONN_ERROR error connecting to message queue (%1)</span></dt><dd><p>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</p></dd><dt><a name="CC_DISCONNECT"></a><span class="term">CC_DISCONNECT disconnecting from message queue daemon</span></dt><dd><p>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</p></dd><dt><a name="CC_ESTABLISH"></a><span class="term">CC_ESTABLISH trying to establish connection with message queue daemon at %1</span></dt><dd><p>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</p></dd><dt><a name="CC_ESTABLISHED"></a><span class="term">CC_ESTABLISHED successfully connected to message queue daemon</span></dt><dd><p>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</p></dd><dt><a name="CC_GROUP_RECEIVE"></a><span class="term">CC_GROUP_RECEIVE trying to receive a message</span></dt><dd><p>
+Debug message, noting that a message is expected to come over the command
+channel.
+</p></dd><dt><a name="CC_GROUP_RECEIVED"></a><span class="term">CC_GROUP_RECEIVED message arrived ('%1', '%2')</span></dt><dd><p>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</p></dd><dt><a name="CC_GROUP_SEND"></a><span class="term">CC_GROUP_SEND sending message '%1' to group '%2'</span></dt><dd><p>
+Debug message, we're about to send a message over the command channel.
+</p></dd><dt><a name="CC_INVALID_LENGTHS"></a><span class="term">CC_INVALID_LENGTHS invalid length parameters (%1, %2)</span></dt><dd><p>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+</p></dd><dt><a name="CC_LENGTH_NOT_READY"></a><span class="term">CC_LENGTH_NOT_READY length not ready</span></dt><dd><p>
+There should be data representing the length of message on the socket, but it
+is not there.
+</p></dd><dt><a name="CC_NO_MESSAGE"></a><span class="term">CC_NO_MESSAGE no message ready to be received yet</span></dt><dd><p>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</p></dd><dt><a name="CC_NO_MSGQ"></a><span class="term">CC_NO_MSGQ unable to connect to message queue (%1)</span></dt><dd><p>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</p></dd><dt><a name="CC_READ_ERROR"></a><span class="term">CC_READ_ERROR error reading data from command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</p></dd><dt><a name="CC_READ_EXCEPTION"></a><span class="term">CC_READ_EXCEPTION error reading data from command channel (%1)</span></dt><dd><p>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</p></dd><dt><a name="CC_REPLY"></a><span class="term">CC_REPLY replying to message from '%1' with '%2'</span></dt><dd><p>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</p></dd><dt><a name="CC_SET_TIMEOUT"></a><span class="term">CC_SET_TIMEOUT setting timeout to %1ms</span></dt><dd><p>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</p></dd><dt><a name="CC_START_READ"></a><span class="term">CC_START_READ starting asynchronous read</span></dt><dd><p>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</p></dd><dt><a name="CC_SUBSCRIBE"></a><span class="term">CC_SUBSCRIBE subscribing to communication group %1</span></dt><dd><p>
+Debug message. The program wants to receive messages addressed to this group.
+</p></dd><dt><a name="CC_TIMEOUT"></a><span class="term">CC_TIMEOUT timeout reading data from command channel</span></dt><dd><p>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</p></dd><dt><a name="CC_UNSUBSCRIBE"></a><span class="term">CC_UNSUBSCRIBE unsubscribing from communication group %1</span></dt><dd><p>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</p></dd><dt><a name="CC_WRITE_ERROR"></a><span class="term">CC_WRITE_ERROR error writing data to command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</p></dd><dt><a name="CC_ZERO_LENGTH"></a><span class="term">CC_ZERO_LENGTH invalid message length (0)</span></dt><dd><p>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</p></dd><dt><a name="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE"></a><span class="term">CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</span></dt><dd><p>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</p></dd><dt><a name="CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE"></a><span class="term">CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2</span></dt><dd><p>
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+</p></dd><dt><a name="CFGMGR_CC_SESSION_ERROR"></a><span class="term">CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</span></dt><dd><p>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</p></dd><dt><a name="CFGMGR_DATA_READ_ERROR"></a><span class="term">CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</span></dt><dd><p>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</p></dd><dt><a name="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_STOPPED_BY_KEYBOARD"></a><span class="term">CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_BAD_CONFIG_DATA"></a><span class="term">CMDCTL_BAD_CONFIG_DATA error in config data: %1</span></dt><dd><p>
+There was an error reading the updated configuration data. The specific
+error is printed.
+</p></dd><dt><a name="CMDCTL_BAD_PASSWORD"></a><span class="term">CMDCTL_BAD_PASSWORD bad password for user: %1</span></dt><dd><p>
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_CC_SESSION_ERROR"></a><span class="term">CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+</p></dd><dt><a name="CMDCTL_CC_SESSION_TIMEOUT"></a><span class="term">CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel</span></dt><dd><p>
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_COMMAND_ERROR"></a><span class="term">CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3</span></dt><dd><p>
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+</p></dd><dt><a name="CMDCTL_COMMAND_SENT"></a><span class="term">CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent</span></dt><dd><p>
+This debug message indicates that the given command has been sent to
+the given module.
+</p></dd><dt><a name="CMDCTL_NO_SUCH_USER"></a><span class="term">CMDCTL_NO_SUCH_USER username not found in user database: %1</span></dt><dd><p>
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_NO_USER_ENTRIES_READ"></a><span class="term">CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied</span></dt><dd><p>
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_SEND_COMMAND"></a><span class="term">CMDCTL_SEND_COMMAND sending command %1 to module %2</span></dt><dd><p>
+This debug message indicates that the given command is being sent to
+the given module.
+</p></dd><dt><a name="CMDCTL_SSL_SETUP_FAILURE_USER_DENIED"></a><span class="term">CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1</span></dt><dd><p>
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+</p></dd><dt><a name="CMDCTL_STOPPED_BY_KEYBOARD"></a><span class="term">CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_UNCAUGHT_EXCEPTION"></a><span class="term">CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1</span></dt><dd><p>
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+</p></dd><dt><a name="CMDCTL_USER_DATABASE_READ_ERROR"></a><span class="term">CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2</span></dt><dd><p>
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
</p></dd><dt><a name="CONFIG_CCSESSION_MSG"></a><span class="term">CONFIG_CCSESSION_MSG error in CC session message: %1</span></dt><dd><p>
There was a problem with an incoming message on the command and control
channel. The message does not appear to be a valid command, and is
@@ -65,77 +662,152 @@ missing a required element or contains an unknown data format. This
most likely means that another BIND10 module is sending a bad message.
The message itself is ignored by this module.
</p></dd><dt><a name="CONFIG_CCSESSION_MSG_INTERNAL"></a><span class="term">CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</span></dt><dd><p>
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
-</p></dd><dt><a name="CONFIG_FOPEN_ERR"></a><span class="term">CONFIG_FOPEN_ERR error opening %1: %2</span></dt><dd><p>
-There was an error opening the given file.
-</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
-There was a parse error in the JSON file. The given file does not appear
-to be in valid JSON format. Please verify that the filename is correct
-and that the contents are valid JSON.
-</p></dd><dt><a name="CONFIG_MANAGER_CONFIG"></a><span class="term">CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</span></dt><dd><p>
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</p><p>
+The most likely cause of this error is a programming error. Please raise
+a bug report.
+</p></dd><dt><a name="CONFIG_GET_FAIL"></a><span class="term">CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</span></dt><dd><p>
The configuration manager returned an error when this module requested
the configuration. The full error message answer from the configuration
manager is appended to the log error. The most likely cause is that
the module is of a different (command specification) version than the
running configuration manager.
-</p></dd><dt><a name="CONFIG_MANAGER_MOD_SPEC"></a><span class="term">CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</span></dt><dd><p>
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
-</p></dd><dt><a name="CONFIG_MODULE_SPEC"></a><span class="term">CONFIG_MODULE_SPEC module specification error in %1: %2</span></dt><dd><p>
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
+</p></dd><dt><a name="CONFIG_GET_FAILED"></a><span class="term">CONFIG_GET_FAILED error getting configuration from cfgmgr: %1</span></dt><dd><p>
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
+</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</p></dd><dt><a name="CONFIG_LOG_CONFIG_ERRORS"></a><span class="term">CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1</span></dt><dd><p>
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
+</p></dd><dt><a name="CONFIG_LOG_EXPLICIT"></a><span class="term">CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program. The logging
+configuration for the program will be updated with the information.
+</p></dd><dt><a name="CONFIG_LOG_IGNORE_EXPLICIT"></a><span class="term">CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger. As this does not match the logger specification for the
+program, it has been ignored.
+</p></dd><dt><a name="CONFIG_LOG_IGNORE_WILD"></a><span class="term">CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry. The configuration is ignored.
+</p></dd><dt><a name="CONFIG_LOG_WILD_MATCH"></a><span class="term">CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_FORMAT"></a><span class="term">CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</span></dt><dd><p>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_REJECT"></a><span class="term">CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</span></dt><dd><p>
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+</p></dd><dt><a name="CONFIG_OPEN_FAIL"></a><span class="term">CONFIG_OPEN_FAIL error opening %1: %2</span></dt><dd><p>
+There was an error opening the given file. The reason for the failure
+is included in the message.
</p></dd><dt><a name="DATASRC_CACHE_CREATE"></a><span class="term">DATASRC_CACHE_CREATE creating the hotspot cache</span></dt><dd><p>
-Debug information that the hotspot cache was created at startup.
+This is a debug message issued during startup when the hotspot cache
+is created.
</p></dd><dt><a name="DATASRC_CACHE_DESTROY"></a><span class="term">DATASRC_CACHE_DESTROY destroying the hotspot cache</span></dt><dd><p>
Debug information. The hotspot cache is being destroyed.
-</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the cache</span></dt><dd><p>
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
-</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the cache</span></dt><dd><p>
-The hotspot cache is enabled from now on.
-</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED the item '%1' is expired</span></dt><dd><p>
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
+</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the hotspot cache</span></dt><dd><p>
+A debug message issued when the hotspot cache is disabled.
+</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the hotspot cache</span></dt><dd><p>
+A debug message issued when the hotspot cache is enabled.
+</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired</span></dt><dd><p>
+A debug message issued when a hotspot cache lookup located the item but it
+had expired. The item was removed and the program proceeded as if the item
+had not been found.
</p></dd><dt><a name="DATASRC_CACHE_FOUND"></a><span class="term">DATASRC_CACHE_FOUND the item '%1' was found</span></dt><dd><p>
-Debug information. An item was successfully looked up in the hotspot cache.
-</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL cache is full, dropping oldest</span></dt><dd><p>
+Debug information. An item was successfully located in the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL hotspot cache is full, dropping oldest</span></dt><dd><p>
Debug information. After inserting an item into the hotspot cache, the
maximum number of items was exceeded, so the least recently used item will
be dropped. This should be directly followed by CACHE_REMOVE.
-</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the cache</span></dt><dd><p>
-Debug information. It means a new item is being inserted into the hotspot
+</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache</span></dt><dd><p>
+A debug message indicating that a new item is being inserted into the hotspot
cache.
-</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found</span></dt><dd><p>
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
-</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache</span></dt><dd><p>
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
+</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing</span></dt><dd><p>
Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
-</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the cache</span></dt><dd><p>
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache</span></dt><dd><p>
Debug information. An item is being removed from the hotspot cache.
-</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items</span></dt><dd><p>
The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
means no limit.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</span></dt><dd><p>
+This was an internal error while reading data from a datasource. This can either
+mean the specific data source implementation is not behaving correctly, or the
+data it provides is invalid. The current search is aborted.
+The error message contains specific information about the error.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_RECORDS"></a><span class="term">DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</span></dt><dd><p>
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</span></dt><dd><p>
+The datasource backend provided resource records for the given RRset with
+different TTL values. The TTL of the RRSET is set to the lowest value, which
+is printed in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</span></dt><dd><p>
+There was an uncaught general exception while reading data from a datasource.
+This most likely points to a logic error in the code, and can be considered a
+bug. The current search is aborted. Specific information about the exception is
+printed in this error message.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</span></dt><dd><p>
+There was an uncaught ISC exception while reading data from a datasource. This
+most likely points to a logic error in the code, and can be considered a bug.
+The current search is aborted. Specific information about the exception is
+printed in this error message.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</span></dt><dd><p>
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION_EXACT"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1</span></dt><dd><p>
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DNAME"></a><span class="term">DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1</span></dt><dd><p>
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXDOMAIN"></a><span class="term">DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</span></dt><dd><p>
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXRRSET"></a><span class="term">DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4</span></dt><dd><p>
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_RRSET"></a><span class="term">DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2</span></dt><dd><p>
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
</p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
-Debug information. We're processing some internal query for given name and
-type.
+A debug message indicating that a query for the given name and RR type is being
+processed.
</p></dd><dt><a name="DATASRC_MEM_ADD_RRSET"></a><span class="term">DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</span></dt><dd><p>
Debug information. An RRset is being added to the in-memory data source.
</p></dd><dt><a name="DATASRC_MEM_ADD_WILDCARD"></a><span class="term">DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</span></dt><dd><p>
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
</p></dd><dt><a name="DATASRC_MEM_ADD_ZONE"></a><span class="term">DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</span></dt><dd><p>
Debug information. A zone is being added into the in-memory data source.
</p></dd><dt><a name="DATASRC_MEM_ANY_SUCCESS"></a><span class="term">DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</span></dt><dd><p>
@@ -146,7 +818,7 @@ Debug information. The requested domain is an alias to a different domain,
returning the CNAME instead.
</p></dd><dt><a name="DATASRC_MEM_CNAME_COEXIST"></a><span class="term">DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</span></dt><dd><p>
This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
+other way around -- adding some other data to CNAME.
</p></dd><dt><a name="DATASRC_MEM_CNAME_TO_NONEMPTY"></a><span class="term">DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</span></dt><dd><p>
Someone or something tried to add a CNAME into a domain that already contains
some other data. But the protocol forbids coexistence of CNAME with anything
@@ -164,10 +836,10 @@ encountered on the way. This may lead to redirection to a different domain and
stop the search.
</p></dd><dt><a name="DATASRC_MEM_DNAME_FOUND"></a><span class="term">DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</span></dt><dd><p>
Debug information. A DNAME was found instead of the requested information.
-</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
</p></dd><dt><a name="DATASRC_MEM_DOMAIN_EMPTY"></a><span class="term">DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</span></dt><dd><p>
Debug information. The requested domain exists in the tree of domains, but
it is empty. Therefore it doesn't contain the requested resource type.
@@ -186,7 +858,7 @@ Debug information. A zone object for this zone is being searched for in the
in-memory data source.
</p></dd><dt><a name="DATASRC_MEM_LOAD"></a><span class="term">DATASRC_MEM_LOAD loading zone '%1' from file '%2'</span></dt><dd><p>
Debug information. The content of master file is being loaded into the memory.
-</p></dd><dt><a name="DATASRC_MEM_NOTFOUND"></a><span class="term">DATASRC_MEM_NOTFOUND requested domain '%1' not found</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_NOT_FOUND"></a><span class="term">DATASRC_MEM_NOT_FOUND requested domain '%1' not found</span></dt><dd><p>
Debug information. The requested domain does not exist.
</p></dd><dt><a name="DATASRC_MEM_NS_ENCOUNTERED"></a><span class="term">DATASRC_MEM_NS_ENCOUNTERED encountered a NS</span></dt><dd><p>
Debug information. While searching for the requested domain, a NS was
@@ -222,21 +894,21 @@ destroyed.
Debug information. A domain above wildcard was reached, but there's something
below the requested domain. Therefore the wildcard doesn't apply here. This
behaviour is specified by RFC 1034, section 4.3.3
-</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</span></dt><dd><p>
The software refuses to load DNAME records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
behave and BIND 9 refuses that as well. Please describe your intention using
different tools.
-</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</span></dt><dd><p>
The software refuses to load NS records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
behave and BIND 9 refuses that as well. Please describe your intention using
different tools.
</p></dd><dt><a name="DATASRC_META_ADD"></a><span class="term">DATASRC_META_ADD adding a data source into meta data source</span></dt><dd><p>
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
</p></dd><dt><a name="DATASRC_META_ADD_CLASS_MISMATCH"></a><span class="term">DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</span></dt><dd><p>
-It was attempted to add a data source into a meta data source. But their
+It was attempted to add a data source into a meta data source, but their
classes do not match.
</p></dd><dt><a name="DATASRC_META_REMOVE"></a><span class="term">DATASRC_META_REMOVE removing data source from meta data source</span></dt><dd><p>
Debug information. A data source is being removed from meta data source.
@@ -257,10 +929,10 @@ specific error already.
</p></dd><dt><a name="DATASRC_QUERY_BAD_REFERRAL"></a><span class="term">DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</span></dt><dd><p>
The domain lives in another zone. But it is not possible to generate referral
information for it.
-</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in cache</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache</span></dt><dd><p>
Debug information. The requested data were found in the hotspot cache, so
no query is sent to the real data source.
-</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'</span></dt><dd><p>
Debug information. While processing a query, lookup to the hotspot cache
is being made.
</p></dd><dt><a name="DATASRC_QUERY_COPY_AUTH"></a><span class="term">DATASRC_QUERY_COPY_AUTH copying authoritative section into message</span></dt><dd><p>
@@ -269,20 +941,19 @@ response message.
</p></dd><dt><a name="DATASRC_QUERY_DELEGATION"></a><span class="term">DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</span></dt><dd><p>
Debug information. The software is trying to identify delegation points on the
way down to the given domain.
-</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</span></dt><dd><p>
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</span></dt><dd><p>
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
</p></dd><dt><a name="DATASRC_QUERY_EMPTY_DNAME"></a><span class="term">DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</span></dt><dd><p>
During an attempt to synthesize CNAME from this DNAME it was discovered the
DNAME is empty (it has no records). This indicates problem with supplied data.
</p></dd><dt><a name="DATASRC_QUERY_FAIL"></a><span class="term">DATASRC_QUERY_FAIL query failed</span></dt><dd><p>
Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
+already and a SERVFAIL will be returned to the querying system.
</p></dd><dt><a name="DATASRC_QUERY_FOLLOW_CNAME"></a><span class="term">DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</span></dt><dd><p>
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
</p></dd><dt><a name="DATASRC_QUERY_GET_MX_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</span></dt><dd><p>
Debug information. While processing a query, a MX record was met. It
references the mentioned address, so A/AAAA records for it are looked up
@@ -301,12 +972,12 @@ operation code.
</p></dd><dt><a name="DATASRC_QUERY_IS_AUTH"></a><span class="term">DATASRC_QUERY_IS_AUTH auth query (%1/%2)</span></dt><dd><p>
Debug information. The last DO_QUERY is an auth query.
</p></dd><dt><a name="DATASRC_QUERY_IS_GLUE"></a><span class="term">DATASRC_QUERY_IS_GLUE glue query (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for glue addresses.
+Debug information. The last DO_QUERY is a query for glue addresses.
</p></dd><dt><a name="DATASRC_QUERY_IS_NOGLUE"></a><span class="term">DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for addresses that are not
+Debug information. The last DO_QUERY is a query for addresses that are not
glue.
</p></dd><dt><a name="DATASRC_QUERY_IS_REF"></a><span class="term">DATASRC_QUERY_IS_REF query for referral (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for referral information.
+Debug information. The last DO_QUERY is a query for referral information.
</p></dd><dt><a name="DATASRC_QUERY_IS_SIMPLE"></a><span class="term">DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</span></dt><dd><p>
Debug information. The last DO_QUERY is a simple query.
</p></dd><dt><a name="DATASRC_QUERY_MISPLACED_TASK"></a><span class="term">DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</span></dt><dd><p>
@@ -324,10 +995,10 @@ does not have one. This indicates problem with provided data.
The underlying data source failed to answer the no-glue query. 1 means some
error, 2 is not implemented. The data source should have logged the specific
error already.
-</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
Debug information. The hotspot cache is ignored for authoritative ANY queries
for consistency reasons.
-</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
Debug information. The hotspot cache is ignored for ANY queries for consistency
reasons.
</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</span></dt><dd><p>
@@ -341,7 +1012,7 @@ Lookup of domain failed because the data have no zone that contain the
domain. Maybe someone sent a query to the wrong server for some reason.
</p></dd><dt><a name="DATASRC_QUERY_PROCESS"></a><span class="term">DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</span></dt><dd><p>
Debug information. A sure query is being processed now.
-</p></dd><dt><a name="DATASRC_QUERY_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_PROVE_NX_FAIL"></a><span class="term">DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
The user wants DNSSEC and we discovered the entity doesn't exist (either
domain or the record). But there was an error getting NSEC/NSEC3 record
to prove the nonexistence.
@@ -357,13 +1028,13 @@ The underlying data source failed to answer the simple query. 1 means some
error, 2 is not implemented. The data source should have logged the specific
error already.
</p></dd><dt><a name="DATASRC_QUERY_SYNTH_CNAME"></a><span class="term">DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</span></dt><dd><p>
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
</p></dd><dt><a name="DATASRC_QUERY_TASK_FAIL"></a><span class="term">DATASRC_QUERY_TASK_FAIL task failed with %1</span></dt><dd><p>
The query subtask failed. The reason should have been reported by the subtask
already. The code is 1 for error, 2 for not implemented.
-</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</span></dt><dd><p>
A CNAME led to another CNAME and it led to another, and so on. After 16
CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
might possibly be a loop as well. Note that some of the CNAMEs might have
@@ -377,7 +1048,7 @@ domain is being looked for now.
</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</span></dt><dd><p>
During an attempt to cover the domain by a wildcard an error happened. The
exact kind was hopefully already reported.
-</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
While processing a wildcard, it wasn't possible to prove nonexistence of the
given domain or record. The code is 1 for error and 2 for not implemented.
</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_REFERRAL"></a><span class="term">DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</span></dt><dd><p>
@@ -385,15 +1056,21 @@ While processing a wildcard, a referral was met. But it wasn't possible to get
enough information for it. The code is 1 for error, 2 for not implemented.
</p></dd><dt><a name="DATASRC_SQLITE_CLOSE"></a><span class="term">DATASRC_SQLITE_CLOSE closing SQLite database</span></dt><dd><p>
Debug information. The SQLite data source is closing the database file.
-</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE sQLite data source created</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_SQLITE_CONNCLOSE"></a><span class="term">DATASRC_SQLITE_CONNCLOSE Closing sqlite database</span></dt><dd><p>
+The database file is no longer needed and is being closed.
+</p></dd><dt><a name="DATASRC_SQLITE_CONNOPEN"></a><span class="term">DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'</span></dt><dd><p>
+The database file is being opened so it can start providing data.
+</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE SQLite data source created</span></dt><dd><p>
Debug information. An instance of SQLite data source is being created.
-</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY sQLite data source destroyed</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY SQLite data source destroyed</span></dt><dd><p>
Debug information. An instance of SQLite data source is being destroyed.
+</p></dd><dt><a name="DATASRC_SQLITE_DROPCONN"></a><span class="term">DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized</span></dt><dd><p>
+The object around a database connection is being destroyed.
</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE"></a><span class="term">DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</span></dt><dd><p>
-Debug information. The SQLite data source is trying to identify, which zone
+Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
-</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOTFOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</span></dt><dd><p>
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOT_FOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'</span></dt><dd><p>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
no such zone in our data.
</p></dd><dt><a name="DATASRC_SQLITE_FIND"></a><span class="term">DATASRC_SQLITE_FIND looking for RRset '%1/%2'</span></dt><dd><p>
Debug information. The SQLite data source is looking up a resource record
@@ -417,7 +1094,7 @@ and type in the database.
Debug information. The SQLite data source is identifying if this domain is
a referral and where it goes.
</p></dd><dt><a name="DATASRC_SQLITE_FINDREF_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</span></dt><dd><p>
-The SQLite data source was trying to identify, if there's a referral. But
+The SQLite data source was trying to identify if there's a referral. But
it contains different class than the query was for.
</p></dd><dt><a name="DATASRC_SQLITE_FIND_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
The SQLite data source was looking up an RRset, but the data source contains
@@ -428,21 +1105,30 @@ source.
</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</span></dt><dd><p>
The SQLite data source was asked to provide a NSEC3 record for given zone.
But it doesn't contain that zone.
+</p></dd><dt><a name="DATASRC_SQLITE_NEWCONN"></a><span class="term">DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized</span></dt><dd><p>
+A wrapper object to hold database connection is being initialized.
</p></dd><dt><a name="DATASRC_SQLITE_OPEN"></a><span class="term">DATASRC_SQLITE_OPEN opening SQLite database '%1'</span></dt><dd><p>
Debug information. The SQLite data source is loading an SQLite database in
the provided file.
</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS"></a><span class="term">DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</span></dt><dd><p>
-Debug information. We're trying to look up name preceding the supplied one.
+This is a debug message. The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS_NO_ZONE"></a><span class="term">DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</span></dt><dd><p>
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
</p></dd><dt><a name="DATASRC_SQLITE_SETUP"></a><span class="term">DATASRC_SQLITE_SETUP setting up SQLite database</span></dt><dd><p>
The database for SQLite data source was found empty. It is assumed this is the
first run and it is being initialized with current schema. It'll still contain
no data, but it will be ready for use.
-</p></dd><dt><a name="DATASRC_STATIC_BAD_CLASS"></a><span class="term">DATASRC_STATIC_BAD_CLASS static data source can handle CH only</span></dt><dd><p>
-For some reason, someone asked the static data source a query that is not in
-the CH class.
+</p></dd><dt><a name="DATASRC_STATIC_CLASS_NOT_CH"></a><span class="term">DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only</span></dt><dd><p>
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
</p></dd><dt><a name="DATASRC_STATIC_CREATE"></a><span class="term">DATASRC_STATIC_CREATE creating the static datasource</span></dt><dd><p>
Debug information. The static data source (the one holding stuff like
version.bind) is being created.
@@ -452,142 +1138,229 @@ data source.
</p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
This indicates a programming error. An internal task of unknown type was
generated.
-</p></dd><dt><a name="LOGIMPL_ABOVEDBGMAX"></a><span class="term">LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
-</p></dd><dt><a name="LOGIMPL_BADDEBUG"></a><span class="term">LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</span></dt><dd><p>
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form. In particular,
-it starts DEBUG but does not end with an integer.
-</p></dd><dt><a name="LOGIMPL_BELOWDBGMIN"></a><span class="term">LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
-</p></dd><dt><a name="MSG_BADDESTINATION"></a><span class="term">MSG_BADDESTINATION unrecognized log destination: %1</span></dt><dd><p>
+</p></dd><dt><a name="LOGIMPL_ABOVE_MAX_DEBUG"></a><span class="term">LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BAD_DEBUG_STRING"></a><span class="term">LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</span></dt><dd><p>
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BELOW_MIN_DEBUG"></a><span class="term">LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOG_BAD_DESTINATION"></a><span class="term">LOG_BAD_DESTINATION unrecognized log destination: %1</span></dt><dd><p>
A logger destination value was given that was not recognized. The
destination should be one of "console", "file", or "syslog".
-</p></dd><dt><a name="MSG_BADSEVERITY"></a><span class="term">MSG_BADSEVERITY unrecognized log severity: %1</span></dt><dd><p>
+</p></dd><dt><a name="LOG_BAD_SEVERITY"></a><span class="term">LOG_BAD_SEVERITY unrecognized log severity: %1</span></dt><dd><p>
A logger severity value was given that was not recognized. The severity
-should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
-</p></dd><dt><a name="MSG_BADSTREAM"></a><span class="term">MSG_BADSTREAM bad log console output stream: %1</span></dt><dd><p>
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
-</p></dd><dt><a name="MSG_DUPLNS"></a><span class="term">MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
-When reading a message file, more than one $NAMESPACE directive was found. In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
-</p></dd><dt><a name="MSG_DUPMSGID"></a><span class="term">MSG_DUPMSGID duplicate message ID (%1) in compiled code</span></dt><dd><p>
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules. (All message
-IDs should be unique throughout BIND10.) This has no impact on the operation
-of the server other that erroneous messages may be logged. (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded. However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
-</p></dd><dt><a name="MSG_IDNOTFND"></a><span class="term">MSG_IDNOTFND could not replace message text for '%1': no such message</span></dt><dd><p>
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
+</p></dd><dt><a name="LOG_BAD_STREAM"></a><span class="term">LOG_BAD_STREAM bad log console output stream: %1</span></dt><dd><p>
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
+</p></dd><dt><a name="LOG_DUPLICATE_MESSAGE_ID"></a><span class="term">LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</span></dt><dd><p>
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code. This indicates a
+programming error; please submit a bug report.
+</p></dd><dt><a name="LOG_DUPLICATE_NAMESPACE"></a><span class="term">LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.) Such a condition is regarded as an
+error and the read will be abandoned.
+</p></dd><dt><a name="LOG_INPUT_OPEN_FAIL"></a><span class="term">LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</span></dt><dd><p>
+The program was not able to open the specified input message file for
+the reason given.
+</p></dd><dt><a name="LOG_INVALID_MESSAGE_ID"></a><span class="term">LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</span></dt><dd><p>
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</p></dd><dt><a name="LOG_NAMESPACE_EXTRA_ARGS"></a><span class="term">LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+</p></dd><dt><a name="LOG_NAMESPACE_INVALID_ARG"></a><span class="term">LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+</p></dd><dt><a name="LOG_NAMESPACE_NO_ARGS"></a><span class="term">LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+</p></dd><dt><a name="LOG_NO_MESSAGE_ID"></a><span class="term">LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+</p></dd><dt><a name="LOG_NO_MESSAGE_TEXT"></a><span class="term">LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</p></dd><dt><a name="LOG_NO_SUCH_MESSAGE"></a><span class="term">LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</span></dt><dd><p>
During start-up a local message file was read. A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary. Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+</p><p>
+There may be several reasons why this message may appear:
+</p><p>
+- The message ID has been mis-spelled in the local message file.
+</p><p>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</p><p>
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+</p><p>
+Whatever the reason, there is no impact on the operation of BIND 10.
+</p></dd><dt><a name="LOG_OPEN_OUTPUT_FAIL"></a><span class="term">LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</span></dt><dd><p>
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
+</p></dd><dt><a name="LOG_PREFIX_EXTRA_ARGS"></a><span class="term">LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</p><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</p></dd><dt><a name="LOG_PREFIX_INVALID_ARG"></a><span class="term">LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
</p><p>
-This message may appear a number of times in the file, once for every such
-unknown message identification.
-</p></dd><dt><a name="MSG_INVMSGID"></a><span class="term">MSG_INVMSGID line %1: invalid message identification '%2'</span></dt><dd><p>
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
-</p></dd><dt><a name="MSG_NOMSGID"></a><span class="term">MSG_NOMSGID line %1: message definition line found without a message ID</span></dt><dd><p>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
-</p></dd><dt><a name="MSG_NOMSGTXT"></a><span class="term">MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
-</p></dd><dt><a name="MSG_NSEXTRARG"></a><span class="term">MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
-</p></dd><dt><a name="MSG_NSINVARG"></a><span class="term">MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
-The $NAMESPACE argument should be a valid C++ namespace. The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct. The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
-</p></dd><dt><a name="MSG_NSNOARG"></a><span class="term">MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
-</p></dd><dt><a name="MSG_OPENIN"></a><span class="term">MSG_OPENIN unable to open message file %1 for input: %2</span></dt><dd><p>
-The program was not able to open the specified input message file for the
-reason given.
-</p></dd><dt><a name="MSG_OPENOUT"></a><span class="term">MSG_OPENOUT unable to open %1 for output: %2</span></dt><dd><p>
-The program was not able to open the specified output file for the reason
-given.
-</p></dd><dt><a name="MSG_PRFEXTRARG"></a><span class="term">MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created. This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
-</p></dd><dt><a name="MSG_PRFINVARG"></a><span class="term">MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
-The $PREFIX argument is used in a symbol name in a C++ header file. As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
-</p></dd><dt><a name="MSG_RDLOCMES"></a><span class="term">MSG_RDLOCMES reading local message file %1</span></dt><dd><p>
-This is an informational message output by BIND10 when it starts to read a
-local message file. (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
-</p></dd><dt><a name="MSG_READERR"></a><span class="term">MSG_READERR error reading from message file %1: %2</span></dt><dd><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</p></dd><dt><a name="LOG_READING_LOCAL_FILE"></a><span class="term">LOG_READING_LOCAL_FILE reading local message file %1</span></dt><dd><p>
+This is an informational message output by BIND 10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
+</p></dd><dt><a name="LOG_READ_ERROR"></a><span class="term">LOG_READ_ERROR error reading from message file %1: %2</span></dt><dd><p>
The specified error was encountered reading from the named message file.
-</p></dd><dt><a name="MSG_UNRECDIR"></a><span class="term">MSG_UNRECDIR line %1: unrecognised directive '%2'</span></dt><dd><p>
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
-</p></dd><dt><a name="MSG_WRITERR"></a><span class="term">MSG_WRITERR error writing to %1: %2</span></dt><dd><p>
-The specified error was encountered by the message compiler when writing to
-the named output file.
-</p></dd><dt><a name="NSAS_INVRESPSTR"></a><span class="term">NSAS_INVRESPSTR queried for %1 but got invalid response</span></dt><dd><p>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for a RR for the
-specified nameserver but received an invalid response. Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
-</p></dd><dt><a name="NSAS_INVRESPTC"></a><span class="term">NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
-</p></dd><dt><a name="NSAS_LOOKUPCANCEL"></a><span class="term">NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</span></dt><dd><p>
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
-</p></dd><dt><a name="NSAS_LOOKUPZONE"></a><span class="term">NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</span></dt><dd><p>
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
-</p></dd><dt><a name="NSAS_NSADDR"></a><span class="term">NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
-</p></dd><dt><a name="NSAS_NSLKUPFAIL"></a><span class="term">NSAS_NSLKUPFAIL failed to lookup any %1 for %2</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver. This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
-</p></dd><dt><a name="NSAS_NSLKUPSUCC"></a><span class="term">NSAS_NSLKUPSUCC found address %1 for %2</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
-</p></dd><dt><a name="NSAS_SETRTT"></a><span class="term">NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</span></dt><dd><p>
+</p></dd><dt><a name="LOG_UNRECOGNISED_DIRECTIVE"></a><span class="term">LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</span></dt><dd><p>
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
+</p></dd><dt><a name="LOG_WRITE_ERROR"></a><span class="term">LOG_WRITE_ERROR error writing to %1: %2</span></dt><dd><p>
+The specified error was encountered by the message compiler when writing
+to the named output file.
+</p></dd><dt><a name="NOTIFY_OUT_INVALID_ADDRESS"></a><span class="term">NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</span></dt><dd><p>
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_OPCODE"></a><span class="term">NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_QID"></a><span class="term">NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_QUERY_NAME"></a><span class="term">NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_QR_NOT_SET"></a><span class="term">NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2</span></dt><dd><p>
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION"></a><span class="term">NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1</span></dt><dd><p>
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+</p></dd><dt><a name="NOTIFY_OUT_RETRY_EXCEEDED"></a><span class="term">NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded</span></dt><dd><p>
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+</p></dd><dt><a name="NOTIFY_OUT_SENDING_NOTIFY"></a><span class="term">NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2</span></dt><dd><p>
+A notify message is sent to the secondary nameserver at the given
+address.
+</p></dd><dt><a name="NOTIFY_OUT_SOCKET_ERROR"></a><span class="term">NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3</span></dt><dd><p>
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
+</p></dd><dt><a name="NOTIFY_OUT_SOCKET_RECV_ERROR"></a><span class="term">NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3</span></dt><dd><p>
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
+</p></dd><dt><a name="NOTIFY_OUT_TIMEOUT"></a><span class="term">NOTIFY_OUT_TIMEOUT retry notify to %1#%2</span></dt><dd><p>
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
+</p></dd><dt><a name="NSAS_FIND_NS_ADDRESS"></a><span class="term">NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
+</p></dd><dt><a name="NSAS_FOUND_ADDRESS"></a><span class="term">NSAS_FOUND_ADDRESS found address %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
+</p></dd><dt><a name="NSAS_INVALID_RESPONSE"></a><span class="term">NSAS_INVALID_RESPONSE queried for %1 but got invalid response</span></dt><dd><p>
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</p><p>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+</p></dd><dt><a name="NSAS_LOOKUP_CANCEL"></a><span class="term">NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</span></dt><dd><p>
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
+</p></dd><dt><a name="NSAS_NS_LOOKUP_FAIL"></a><span class="term">NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
+</p></dd><dt><a name="NSAS_SEARCH_ZONE_NS"></a><span class="term">NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</span></dt><dd><p>
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
+</p></dd><dt><a name="NSAS_UPDATE_RTT"></a><span class="term">NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</span></dt><dd><p>
A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver. The RTT has been updated using the value given and the new RTT is
-displayed. (The RTT is subject to a calculation that damps out sudden
-changes. As a result, the new RTT is not necessarily equal to the RTT
-reported.)
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</p></dd><dt><a name="NSAS_WRONG_ANSWER"></a><span class="term">NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</p><p>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</p></dd><dt><a name="RESLIB_ANSWER"></a><span class="term">RESLIB_ANSWER answer received in response to query for <%1></span></dt><dd><p>
A debug message recording that an answer has been received to an upstream
query for the specified question. Previous debug messages will have indicated
@@ -599,95 +1372,95 @@ the server to which the question was sent.
</p></dd><dt><a name="RESLIB_DEEPEST"></a><span class="term">RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</span></dt><dd><p>
A debug message, a cache lookup did not find the specified <name, class,
type> tuple in the cache; instead, the deepest delegation found is indicated.
-</p></dd><dt><a name="RESLIB_FOLLOWCNAME"></a><span class="term">RESLIB_FOLLOWCNAME following CNAME chain to <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_FOLLOW_CNAME"></a><span class="term">RESLIB_FOLLOW_CNAME following CNAME chain to <%1></span></dt><dd><p>
A debug message, a CNAME response was received and another query is being issued
for the <name, class, type> tuple.
-</p></dd><dt><a name="RESLIB_LONGCHAIN"></a><span class="term">RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_LONG_CHAIN"></a><span class="term">RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
A debug message recording that a CNAME response has been received to an upstream
query for the specified question (Previous debug messages will have indicated
the server to which the question was sent). However, receipt of this CNAME
has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
is where on CNAME points to another) and so an error is being returned.
-</p></dd><dt><a name="RESLIB_NONSRRSET"></a><span class="term">RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NO_NS_RRSET"></a><span class="term">RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
A debug message, this indicates that a response was received for the specified
-query and was categorised as a referral. However, the received message did
+query and was categorized as a referral. However, the received message did
not contain any NS RRsets. This may indicate a programming error in the
response classification code.
-</p></dd><dt><a name="RESLIB_NSASLOOK"></a><span class="term">RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NSAS_LOOKUP"></a><span class="term">RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
A debug message, the RunningQuery object is querying the NSAS for the
nameservers for the specified zone.
-</p></dd><dt><a name="RESLIB_NXDOMRR"></a><span class="term">RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NXDOM_NXRR"></a><span class="term">RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
A debug message recording that either a NXDOMAIN or an NXRRSET response has
been received to an upstream query for the specified question. Previous debug
messages will have indicated the server to which the question was sent.
</p></dd><dt><a name="RESLIB_PROTOCOL"></a><span class="term">RESLIB_PROTOCOL protocol error in answer for %1: %3</span></dt><dd><p>
A debug message indicating that a protocol error was received. As there
are no retries left, an error will be reported.
-</p></dd><dt><a name="RESLIB_PROTOCOLRTRY"></a><span class="term">RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_PROTOCOL_RETRY"></a><span class="term">RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
-</p></dd><dt><a name="RESLIB_RCODERR"></a><span class="term">RESLIB_RCODERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RCODE_ERR"></a><span class="term">RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
-</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
-A debug message recording that a referral response has been received to an
-upstream query for the specified question. Previous debug messages will
-have indicated the server to which the question was sent.
-</p></dd><dt><a name="RESLIB_REFERZONE"></a><span class="term">RESLIB_REFERZONE referred to zone %1</span></dt><dd><p>
-A debug message indicating that the last referral message was to the specified
-zone.
-</p></dd><dt><a name="RESLIB_RESCAFND"></a><span class="term">RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_FIND"></a><span class="term">RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
This is a debug message and indicates that a RecursiveQuery object found the
the specified <name, class, type> tuple in the cache. The instance number
at the end of the message indicates which of the two resolve() methods has
been called.
-</p></dd><dt><a name="RESLIB_RESCANOTFND"></a><span class="term">RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_NO_FIND"></a><span class="term">RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
This is a debug message and indicates that the look in the cache made by the
RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
object has been created to resolve the question. The instance number at
the end of the message indicates which of the two resolve() methods has
been called.
+</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_REFER_ZONE"></a><span class="term">RESLIB_REFER_ZONE referred to zone %1</span></dt><dd><p>
+A debug message indicating that the last referral message was to the specified
+zone.
</p></dd><dt><a name="RESLIB_RESOLVE"></a><span class="term">RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</span></dt><dd><p>
A debug message, the RecursiveQuery::resolve method has been called to resolve
the specified <name, class, type> tuple. The first action will be to lookup
the specified tuple in the cache. The instance number at the end of the
message indicates which of the two resolve() methods has been called.
-</p></dd><dt><a name="RESLIB_RRSETFND"></a><span class="term">RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RRSET_FOUND"></a><span class="term">RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
A debug message, indicating that when RecursiveQuery::resolve queried the
cache, a single RRset was found which was put in the answer. The instance
number at the end of the message indicates which of the two resolve()
methods has been called.
</p></dd><dt><a name="RESLIB_RTT"></a><span class="term">RESLIB_RTT round-trip time of last query calculated as %1 ms</span></dt><dd><p>
A debug message giving the round-trip time of the last query and response.
-</p></dd><dt><a name="RESLIB_RUNCAFND"></a><span class="term">RESLIB_RUNCAFND found <%1> in the cache</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_FIND"></a><span class="term">RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</span></dt><dd><p>
This is a debug message and indicates that a RunningQuery object found
the specified <name, class, type> tuple in the cache.
-</p></dd><dt><a name="RESLIB_RUNCALOOK"></a><span class="term">RESLIB_RUNCALOOK looking up up <%1> in the cache</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_LOOKUP"></a><span class="term">RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</span></dt><dd><p>
This is a debug message and indicates that a RunningQuery object has made
a call to its doLookup() method to look up the specified <name, class, type>
tuple, the first action of which will be to examine the cache.
-</p></dd><dt><a name="RESLIB_RUNQUFAIL"></a><span class="term">RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_FAIL"></a><span class="term">RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</span></dt><dd><p>
A debug message indicating that a RunningQuery's failure callback has been
called because all nameservers for the zone in question are unreachable.
-</p></dd><dt><a name="RESLIB_RUNQUSUCC"></a><span class="term">RESLIB_RUNQUSUCC success callback - sending query to %1</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_SUCCESS"></a><span class="term">RESLIB_RUNQ_SUCCESS success callback - sending query to %1</span></dt><dd><p>
A debug message indicating that a RunningQuery's success callback has been
called because a nameserver has been found, and that a query is being sent
to the specified nameserver.
-</p></dd><dt><a name="RESLIB_TESTSERV"></a><span class="term">RESLIB_TESTSERV setting test server to %1(%2)</span></dt><dd><p>
-This is an internal debugging message and is only generated in unit tests.
-It indicates that all upstream queries from the resolver are being routed to
-the specified server, regardless of the address of the nameserver to which
-the query would normally be routed. As it should never be seen in normal
-operation, it is a warning message instead of a debug message.
-</p></dd><dt><a name="RESLIB_TESTUPSTR"></a><span class="term">RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_TEST_SERVER"></a><span class="term">RESLIB_TEST_SERVER setting test server to %1(%2)</span></dt><dd><p>
+This is a warning message only generated in unit tests. It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed. If seen during normal operation,
+please submit a bug report.
+</p></dd><dt><a name="RESLIB_TEST_UPSTREAM"></a><span class="term">RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</span></dt><dd><p>
This is a debug message and should only be seen in unit tests. A query for
the specified <name, class, type> tuple is being sent to a test nameserver
whose address is given in the message.
</p></dd><dt><a name="RESLIB_TIMEOUT"></a><span class="term">RESLIB_TIMEOUT query <%1> to %2 timed out</span></dt><dd><p>
-A debug message indicating that the specified query has timed out and as
-there are no retries left, an error will be reported.
-</p></dd><dt><a name="RESLIB_TIMEOUTRTRY"></a><span class="term">RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
+</p></dd><dt><a name="RESLIB_TIMEOUT_RETRY"></a><span class="term">RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
A debug message indicating that the specified query has timed out and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
@@ -699,143 +1472,610 @@ gives no cause for concern.
</p></dd><dt><a name="RESLIB_UPSTREAM"></a><span class="term">RESLIB_UPSTREAM sending upstream query for <%1> to %2</span></dt><dd><p>
A debug message indicating that a query for the specified <name, class, type>
tuple is being sent to a nameserver whose address is given in the message.
-</p></dd><dt><a name="RESOLVER_AXFRTCP"></a><span class="term">RESOLVER_AXFRTCP AXFR request received over TCP</span></dt><dd><p>
-A debug message, the resolver received a NOTIFY message over TCP. The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_AXFRUDP"></a><span class="term">RESOLVER_AXFRUDP AXFR request received over UDP</span></dt><dd><p>
-A debug message, the resolver received a NOTIFY message over UDP. The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
-</p></dd><dt><a name="RESOLVER_CLTMOSMALL"></a><span class="term">RESOLVER_CLTMOSMALL client timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the query
-timeout is too small.
-</p></dd><dt><a name="RESOLVER_CONFIGCHAN"></a><span class="term">RESOLVER_CONFIGCHAN configuration channel created</span></dt><dd><p>
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
-</p></dd><dt><a name="RESOLVER_CONFIGERR"></a><span class="term">RESOLVER_CONFIGERR error in configuration: %1</span></dt><dd><p>
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error). The reason for the error, given as a parameter in the message,
-will give more details.
-</p></dd><dt><a name="RESOLVER_CONFIGLOAD"></a><span class="term">RESOLVER_CONFIGLOAD configuration loaded</span></dt><dd><p>
-A debug message, output when the resolver configuration has been successfully
-loaded.
-</p></dd><dt><a name="RESOLVER_CONFIGUPD"></a><span class="term">RESOLVER_CONFIGUPD configuration updated: %1</span></dt><dd><p>
-A debug message, the configuration has been updated with the specified
-information.
+</p></dd><dt><a name="RESOLVER_AXFR_TCP"></a><span class="term">RESOLVER_AXFR_TCP AXFR request received over TCP</span></dt><dd><p>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_AXFR_UDP"></a><span class="term">RESOLVER_AXFR_UDP AXFR request received over UDP</span></dt><dd><p>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_CLIENT_TIME_SMALL"></a><span class="term">RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
+</p></dd><dt><a name="RESOLVER_CONFIG_CHANNEL"></a><span class="term">RESOLVER_CONFIG_CHANNEL configuration channel created</span></dt><dd><p>
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
+</p></dd><dt><a name="RESOLVER_CONFIG_ERROR"></a><span class="term">RESOLVER_CONFIG_ERROR error in configuration: %1</span></dt><dd><p>
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
+</p></dd><dt><a name="RESOLVER_CONFIG_LOADED"></a><span class="term">RESOLVER_CONFIG_LOADED configuration loaded</span></dt><dd><p>
+This is a debug message output when the resolver configuration has been
+successfully loaded.
+</p></dd><dt><a name="RESOLVER_CONFIG_UPDATED"></a><span class="term">RESOLVER_CONFIG_UPDATED configuration updated: %1</span></dt><dd><p>
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
</p></dd><dt><a name="RESOLVER_CREATED"></a><span class="term">RESOLVER_CREATED main resolver object created</span></dt><dd><p>
-A debug message, output when the Resolver() object has been created.
-</p></dd><dt><a name="RESOLVER_DNSMSGRCVD"></a><span class="term">RESOLVER_DNSMSGRCVD DNS message received: %1</span></dt><dd><p>
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
-</p></dd><dt><a name="RESOLVER_DNSMSGSENT"></a><span class="term">RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
-A debug message, this contains details of the response sent back to the querying
-system.
+This is a debug message indicating that the main resolver object has
+been created.
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_RECEIVED"></a><span class="term">RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</span></dt><dd><p>
+This is a debug message from the resolver listing the contents of a
+received DNS message.
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_SENT"></a><span class="term">RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
</p></dd><dt><a name="RESOLVER_FAILED"></a><span class="term">RESOLVER_FAILED resolver failed, reason: %1</span></dt><dd><p>
-This is an error message output when an unhandled exception is caught by the
-resolver. All it can do is to shut down.
-</p></dd><dt><a name="RESOLVER_FWDADDR"></a><span class="term">RESOLVER_FWDADDR setting forward address %1(%2)</span></dt><dd><p>
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
-</p></dd><dt><a name="RESOLVER_FWDQUERY"></a><span class="term">RESOLVER_FWDQUERY processing forward query</span></dt><dd><p>
-The received query has passed all checks and is being forwarded to upstream
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
+</p></dd><dt><a name="RESOLVER_FORWARD_ADDRESS"></a><span class="term">RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</span></dt><dd><p>
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
+</p></dd><dt><a name="RESOLVER_FORWARD_QUERY"></a><span class="term">RESOLVER_FORWARD_QUERY processing forward query</span></dt><dd><p>
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
servers.
-</p></dd><dt><a name="RESOLVER_HDRERR"></a><span class="term">RESOLVER_HDRERR message received, exception when processing header: %1</span></dt><dd><p>
-A debug message noting that an exception occurred during the processing of
-a received packet. The packet has been dropped.
+</p></dd><dt><a name="RESOLVER_HEADER_ERROR"></a><span class="term">RESOLVER_HEADER_ERROR message received, exception when processing header: %1</span></dt><dd><p>
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
</p></dd><dt><a name="RESOLVER_IXFR"></a><span class="term">RESOLVER_IXFR IXFR request received</span></dt><dd><p>
-The resolver received a NOTIFY message over TCP. The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_LKTMOSMALL"></a><span class="term">RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
-</p></dd><dt><a name="RESOLVER_NFYNOTAUTH"></a><span class="term">RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</span></dt><dd><p>
-The resolver received a NOTIFY message. As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
-</p></dd><dt><a name="RESOLVER_NORMQUERY"></a><span class="term">RESOLVER_NORMQUERY processing normal query</span></dt><dd><p>
-The received query has passed all checks and is being processed by the resolver.
-</p></dd><dt><a name="RESOLVER_NOROOTADDR"></a><span class="term">RESOLVER_NOROOTADDR no root addresses available</span></dt><dd><p>
-A warning message during startup, indicates that no root addresses have been
-set. This may be because the resolver will get them from a priming query.
-</p></dd><dt><a name="RESOLVER_NOTIN"></a><span class="term">RESOLVER_NOTIN non-IN class request received, returning REFUSED message</span></dt><dd><p>
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
-</p></dd><dt><a name="RESOLVER_NOTONEQUES"></a><span class="term">RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</span></dt><dd><p>
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message. This is a malformed
-message, as a DNS query must contain only one question. The resolver will
-return a message to the sender with the RCODE set to FORMERR.
-</p></dd><dt><a name="RESOLVER_OPCODEUNS"></a><span class="term">RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</span></dt><dd><p>
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes). It will return a message to the sender
-with the RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_PARSEERR"></a><span class="term">RESOLVER_PARSEERR error parsing received message: %1 - returning %2</span></dt><dd><p>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded). The message parameters give
-a textual description of the problem and the RCODE returned.
-</p></dd><dt><a name="RESOLVER_PRINTMSG"></a><span class="term">RESOLVER_PRINTMSG print message command, aeguments are: %1</span></dt><dd><p>
-This message is logged when a "print_message" command is received over the
-command channel.
-</p></dd><dt><a name="RESOLVER_PROTERR"></a><span class="term">RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded). The message parameters give a textual
-description of the problem and the RCODE returned.
-</p></dd><dt><a name="RESOLVER_QUSETUP"></a><span class="term">RESOLVER_QUSETUP query setup</span></dt><dd><p>
-A debug message noting that the resolver is creating a RecursiveQuery object.
-</p></dd><dt><a name="RESOLVER_QUSHUT"></a><span class="term">RESOLVER_QUSHUT query shutdown</span></dt><dd><p>
-A debug message noting that the resolver is destroying a RecursiveQuery object.
-</p></dd><dt><a name="RESOLVER_QUTMOSMALL"></a><span class="term">RESOLVER_QUTMOSMALL query timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_LOOKUP_TIME_SMALL"></a><span class="term">RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
+</p></dd><dt><a name="RESOLVER_MESSAGE_ERROR"></a><span class="term">RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_NEGATIVE_RETRIES"></a><span class="term">RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</span></dt><dd><p>
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
+</p></dd><dt><a name="RESOLVER_NON_IN_PACKET"></a><span class="term">RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</span></dt><dd><p>
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
+</p></dd><dt><a name="RESOLVER_NORMAL_QUERY"></a><span class="term">RESOLVER_NORMAL_QUERY processing normal query</span></dt><dd><p>
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
+</p></dd><dt><a name="RESOLVER_NOTIFY_RECEIVED"></a><span class="term">RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</span></dt><dd><p>
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
+</p></dd><dt><a name="RESOLVER_NOT_ONE_QUESTION"></a><span class="term">RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</span></dt><dd><p>
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_NO_ROOT_ADDRESS"></a><span class="term">RESOLVER_NO_ROOT_ADDRESS no root addresses available</span></dt><dd><p>
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
+</p></dd><dt><a name="RESOLVER_PARSE_ERROR"></a><span class="term">RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
+</p></dd><dt><a name="RESOLVER_PRINT_COMMAND"></a><span class="term">RESOLVER_PRINT_COMMAND print message command, arguments are: %1</span></dt><dd><p>
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
+</p></dd><dt><a name="RESOLVER_PROTOCOL_ERROR"></a><span class="term">RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_QUERY_ACCEPTED"></a><span class="term">RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</span></dt><dd><p>
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_DROPPED"></a><span class="term">RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</span></dt><dd><p>
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_REJECTED"></a><span class="term">RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</span></dt><dd><p>
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_SETUP"></a><span class="term">RESOLVER_QUERY_SETUP query setup</span></dt><dd><p>
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUERY_SHUTDOWN"></a><span class="term">RESOLVER_QUERY_SHUTDOWN query shutdown</span></dt><dd><p>
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUERY_TIME_SMALL"></a><span class="term">RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
+</p></dd><dt><a name="RESOLVER_RECEIVED_MESSAGE"></a><span class="term">RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</span></dt><dd><p>
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
</p></dd><dt><a name="RESOLVER_RECURSIVE"></a><span class="term">RESOLVER_RECURSIVE running in recursive mode</span></dt><dd><p>
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
-</p></dd><dt><a name="RESOLVER_RECVMSG"></a><span class="term">RESOLVER_RECVMSG resolver has received a DNS message</span></dt><dd><p>
-A debug message indicating that the resolver has received a message. Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
-</p></dd><dt><a name="RESOLVER_RETRYNEG"></a><span class="term">RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</span></dt><dd><p>
-An error message indicating that the resolver configuration has specified a
-negative retry count. Only zero or positive values are valid.
-</p></dd><dt><a name="RESOLVER_ROOTADDR"></a><span class="term">RESOLVER_ROOTADDR setting root address %1(%2)</span></dt><dd><p>
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
-</p></dd><dt><a name="RESOLVER_SERVICE"></a><span class="term">RESOLVER_SERVICE service object created</span></dt><dd><p>
-A debug message, output when the main service object (which handles the
-received queries) is created.
-</p></dd><dt><a name="RESOLVER_SETPARAM"></a><span class="term">RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
-A debug message, lists the parameters associated with the message. These are:
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
+</p></dd><dt><a name="RESOLVER_SERVICE_CREATED"></a><span class="term">RESOLVER_SERVICE_CREATED service object created</span></dt><dd><p>
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
+</p></dd><dt><a name="RESOLVER_SET_PARAMS"></a><span class="term">RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
+This debug message lists the parameters being set for the resolver. These are:
query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers. Client timeout: the interval to resolver a query by
+to upstream servers. Client timeout: the interval to resolve a query by
a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
+whilst continuing to resolve the query. Lookup timeout: the time at which the
resolver gives up trying to resolve a query. Retry count: the number of times
the resolver will retry a query to an upstream server if it gets a timeout.
</p><p>
The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
upstream nameservers. Even if none of these queries timeout, the total time
taken to perform all the queries may exceed the client timeout. When this
happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache. However,
-there comes a time - the lookup timeout - when even the resolve gives up.
+with the resolution process; data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
At this point it will wait for pending upstream queries to complete or
timeout and drop the query.
+</p></dd><dt><a name="RESOLVER_SET_QUERY_ACL"></a><span class="term">RESOLVER_SET_QUERY_ACL query ACL is configured</span></dt><dd><p>
+This debug message is generated when a new query ACL is configured for
+the resolver.
+</p></dd><dt><a name="RESOLVER_SET_ROOT_ADDRESS"></a><span class="term">RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</span></dt><dd><p>
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
</p></dd><dt><a name="RESOLVER_SHUTDOWN"></a><span class="term">RESOLVER_SHUTDOWN resolver shutdown complete</span></dt><dd><p>
-This information message is output when the resolver has shut down.
+This informational message is output when the resolver has shut down.
</p></dd><dt><a name="RESOLVER_STARTED"></a><span class="term">RESOLVER_STARTED resolver started</span></dt><dd><p>
This informational message is output by the resolver when all initialization
has been completed and it is entering its main loop.
</p></dd><dt><a name="RESOLVER_STARTING"></a><span class="term">RESOLVER_STARTING starting resolver with command line '%1'</span></dt><dd><p>
An informational message, this is output when the resolver starts up.
-</p></dd><dt><a name="RESOLVER_UNEXRESP"></a><span class="term">RESOLVER_UNEXRESP received unexpected response, ignoring</span></dt><dd><p>
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
+</p></dd><dt><a name="RESOLVER_UNEXPECTED_RESPONSE"></a><span class="term">RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</span></dt><dd><p>
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
+</p></dd><dt><a name="RESOLVER_UNSUPPORTED_OPCODE"></a><span class="term">RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</span></dt><dd><p>
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="SRVCOMM_ADDRESSES_NOT_LIST"></a><span class="term">SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1</span></dt><dd><p>
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_FAIL"></a><span class="term">SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)</span></dt><dd><p>
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+</p></dd><dt><a name="SRVCOMM_ADDRESS_MISSING"></a><span class="term">SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1</span></dt><dd><p>
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_TYPE"></a><span class="term">SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1</span></dt><dd><p>
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+</p></dd><dt><a name="SRVCOMM_ADDRESS_UNRECOVERABLE"></a><span class="term">SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)</span></dt><dd><p>
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+</p><p>
+The condition indicates problems with the server and/or the system on
+which it is running. The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_VALUE"></a><span class="term">SRVCOMM_ADDRESS_VALUE address to set: %1#%2</span></dt><dd><p>
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+</p></dd><dt><a name="SRVCOMM_KEYS_DEINIT"></a><span class="term">SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring</span></dt><dd><p>
+Debug message indicating that the server is deinitializing the TSIG keyring.
+</p></dd><dt><a name="SRVCOMM_KEYS_INIT"></a><span class="term">SRVCOMM_KEYS_INIT initializing TSIG keyring</span></dt><dd><p>
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+</p></dd><dt><a name="SRVCOMM_KEYS_UPDATE"></a><span class="term">SRVCOMM_KEYS_UPDATE updating TSIG keyring</span></dt><dd><p>
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+</p></dd><dt><a name="SRVCOMM_PORT_RANGE"></a><span class="term">SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)</span></dt><dd><p>
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+</p></dd><dt><a name="SRVCOMM_SET_LISTEN"></a><span class="term">SRVCOMM_SET_LISTEN setting addresses to listen to</span></dt><dd><p>
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
+</p></dd><dt><a name="STATHTTPD_BAD_OPTION_VALUE"></a><span class="term">STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1</span></dt><dd><p>
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+</p></dd><dt><a name="STATHTTPD_CC_SESSION_ERROR"></a><span class="term">STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1</span></dt><dd><p>
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+</p></dd><dt><a name="STATHTTPD_CLOSING"></a><span class="term">STATHTTPD_CLOSING closing %1#%2</span></dt><dd><p>
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+</p></dd><dt><a name="STATHTTPD_CLOSING_CC_SESSION"></a><span class="term">STATHTTPD_CLOSING_CC_SESSION stopping cc session</span></dt><dd><p>
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+</p></dd><dt><a name="STATHTTPD_HANDLE_CONFIG"></a><span class="term">STATHTTPD_HANDLE_CONFIG reading configuration: %1</span></dt><dd><p>
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_STATUS_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status</span></dt><dd><p>
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_UNKNOWN_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</span></dt><dd><p>
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+</p></dd><dt><a name="STATHTTPD_SERVER_ERROR"></a><span class="term">STATHTTPD_SERVER_ERROR HTTP server error: %1</span></dt><dd><p>
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+</p></dd><dt><a name="STATHTTPD_SERVER_INIT_ERROR"></a><span class="term">STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1</span></dt><dd><p>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+</p></dd><dt><a name="STATHTTPD_SHUTDOWN"></a><span class="term">STATHTTPD_SHUTDOWN shutting down</span></dt><dd><p>
+The stats-httpd daemon is shutting down.
+</p></dd><dt><a name="STATHTTPD_STARTED"></a><span class="term">STATHTTPD_STARTED listening on %1#%2</span></dt><dd><p>
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+</p></dd><dt><a name="STATHTTPD_STARTING_CC_SESSION"></a><span class="term">STATHTTPD_STARTING_CC_SESSION starting cc session</span></dt><dd><p>
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+</p></dd><dt><a name="STATHTTPD_START_SERVER_INIT_ERROR"></a><span class="term">STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1</span></dt><dd><p>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+</p></dd><dt><a name="STATHTTPD_STOPPED_BY_KEYBOARD"></a><span class="term">STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+</p></dd><dt><a name="STATHTTPD_UNKNOWN_CONFIG_ITEM"></a><span class="term">STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1</span></dt><dd><p>
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
+</p></dd><dt><a name="STATS_BAD_OPTION_VALUE"></a><span class="term">STATS_BAD_OPTION_VALUE bad command line argument: %1</span></dt><dd><p>
+The stats module was called with a bad command-line argument and will
+not start.
+</p></dd><dt><a name="STATS_CC_SESSION_ERROR"></a><span class="term">STATS_CC_SESSION_ERROR error connecting to message bus: %1</span></dt><dd><p>
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+</p></dd><dt><a name="STATS_RECEIVED_NEW_CONFIG"></a><span class="term">STATS_RECEIVED_NEW_CONFIG received new configuration: %1</span></dt><dd><p>
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+</p></dd><dt><a name="STATS_RECEIVED_REMOVE_COMMAND"></a><span class="term">STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</span></dt><dd><p>
+A remove command for the given name was sent to the stats module, and
+the given statistics value will now be removed. It will not appear in
+statistics reports until it appears in a statistics update from a
+module again.
+</p></dd><dt><a name="STATS_RECEIVED_RESET_COMMAND"></a><span class="term">STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</span></dt><dd><p>
+The stats module received a command to clear all collected statistics.
+The data is cleared until it receives an update from the modules again.
+</p></dd><dt><a name="STATS_RECEIVED_SHOW_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</span></dt><dd><p>
+The stats module received a command to show all statistics that it has
+collected.
+</p></dd><dt><a name="STATS_RECEIVED_SHOW_NAME_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1</span></dt><dd><p>
+The stats module received a command to show the statistics that it has
+collected for the given item.
+</p></dd><dt><a name="STATS_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+A shutdown command was sent to the stats module and it will now shut down.
+</p></dd><dt><a name="STATS_RECEIVED_STATUS_COMMAND"></a><span class="term">STATS_RECEIVED_STATUS_COMMAND received command to return status</span></dt><dd><p>
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+</p></dd><dt><a name="STATS_RECEIVED_UNKNOWN_COMMAND"></a><span class="term">STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</span></dt><dd><p>
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+</p></dd><dt><a name="STATS_SEND_REQUEST_BOSS"></a><span class="term">STATS_SEND_REQUEST_BOSS requesting boss to send statistics</span></dt><dd><p>
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+</p></dd><dt><a name="STATS_STOPPED_BY_KEYBOARD"></a><span class="term">STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+</p></dd><dt><a name="STATS_UNKNOWN_COMMAND_IN_SPEC"></a><span class="term">STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1</span></dt><dd><p>
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+</p></dd><dt><a name="XFRIN_AXFR_DATABASE_FAILURE"></a><span class="term">XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_INTERNAL_FAILURE"></a><span class="term">XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_STARTED"></a><span class="term">XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</span></dt><dd><p>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</span></dt><dd><p>
+The AXFR transfer of the given zone was successfully completed.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_ADDR_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</span></dt><dd><p>
+The given master address is not a valid IP address.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_PORT_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</span></dt><dd><p>
+The master port as read from the configuration is not a valid port number.
+</p></dd><dt><a name="XFRIN_BAD_TSIG_KEY_STRING"></a><span class="term">XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFRIN_BAD_ZONE_CLASS"></a><span class="term">XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</span></dt><dd><p>
+The zone class as read from the configuration is not a valid DNS class.
+</p></dd><dt><a name="XFRIN_CC_SESSION_ERROR"></a><span class="term">XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</p></dd><dt><a name="XFRIN_COMMAND_ERROR"></a><span class="term">XFRIN_COMMAND_ERROR error while executing command '%1': %2</span></dt><dd><p>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</p></dd><dt><a name="XFRIN_CONNECT_MASTER"></a><span class="term">XFRIN_CONNECT_MASTER error connecting to master at %1: %2</span></dt><dd><p>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</p></dd><dt><a name="XFRIN_IMPORT_DNS"></a><span class="term">XFRIN_IMPORT_DNS error importing python DNS module: %1</span></dt><dd><p>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR"></a><span class="term">XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</span></dt><dd><p>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER"></a><span class="term">XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</span></dt><dd><p>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</p></dd><dt><a name="XFRIN_RETRANSFER_UNKNOWN_ZONE"></a><span class="term">XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</span></dt><dd><p>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</p></dd><dt><a name="XFRIN_STARTING"></a><span class="term">XFRIN_STARTING starting resolver with command line '%1'</span></dt><dd><p>
+An informational message, this is output when the resolver starts up.
+</p></dd><dt><a name="XFRIN_STOPPED_BY_KEYBOARD"></a><span class="term">XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFRIN_UNKNOWN_ERROR"></a><span class="term">XFRIN_UNKNOWN_ERROR unknown error: %1</span></dt><dd><p>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_DONE"></a><span class="term">XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</span></dt><dd><p>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_ERROR"></a><span class="term">XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</span></dt><dd><p>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_FAILED"></a><span class="term">XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</span></dt><dd><p>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_STARTED"></a><span class="term">XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</span></dt><dd><p>
+A transfer out of the given zone has started.
+</p></dd><dt><a name="XFROUT_BAD_TSIG_KEY_STRING"></a><span class="term">XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFROUT_CC_SESSION_ERROR"></a><span class="term">XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</p></dd><dt><a name="XFROUT_CC_SESSION_TIMEOUT_ERROR"></a><span class="term">XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</span></dt><dd><p>
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</p></dd><dt><a name="XFROUT_FETCH_REQUEST_ERROR"></a><span class="term">XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</span></dt><dd><p>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</p></dd><dt><a name="XFROUT_HANDLE_QUERY_ERROR"></a><span class="term">XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</span></dt><dd><p>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</p></dd><dt><a name="XFROUT_IMPORT"></a><span class="term">XFROUT_IMPORT error importing python module: %1</span></dt><dd><p>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG"></a><span class="term">XFROUT_NEW_CONFIG Update xfrout configuration</span></dt><dd><p>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG_DONE"></a><span class="term">XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</span></dt><dd><p>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</p></dd><dt><a name="XFROUT_NOTIFY_COMMAND"></a><span class="term">XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</span></dt><dd><p>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</p></dd><dt><a name="XFROUT_PARSE_QUERY_ERROR"></a><span class="term">XFROUT_PARSE_QUERY_ERROR error parsing query: %1</span></dt><dd><p>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</p></dd><dt><a name="XFROUT_PROCESS_REQUEST_ERROR"></a><span class="term">XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</span></dt><dd><p>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</span></dt><dd><p>
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</span></dt><dd><p>
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+</p></dd><dt><a name="XFROUT_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</p></dd><dt><a name="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR"></a><span class="term">XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</span></dt><dd><p>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</p></dd><dt><a name="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</span></dt><dd><p>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</p></dd><dt><a name="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</span></dt><dd><p>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</p></dd><dt><a name="XFROUT_SOCKET_SELECT_ERROR"></a><span class="term">XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</span></dt><dd><p>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</p></dd><dt><a name="XFROUT_STOPPED_BY_KEYBOARD"></a><span class="term">XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFROUT_STOPPING"></a><span class="term">XFROUT_STOPPING the xfrout daemon is shutting down</span></dt><dd><p>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</p></dd><dt><a name="XFROUT_UNIX_SOCKET_FILE_IN_USE"></a><span class="term">XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</span></dt><dd><p>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</p></dd><dt><a name="ZONEMGR_CCSESSION_ERROR"></a><span class="term">ZONEMGR_CCSESSION_ERROR command channel session error: %1</span></dt><dd><p>
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+</p></dd><dt><a name="ZONEMGR_JITTER_TOO_BIG"></a><span class="term">ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5</span></dt><dd><p>
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+</p></dd><dt><a name="ZONEMGR_KEYBOARD_INTERRUPT"></a><span class="term">ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt</span></dt><dd><p>
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+</p></dd><dt><a name="ZONEMGR_LOAD_ZONE"></a><span class="term">ZONEMGR_LOAD_ZONE loading zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+</p></dd><dt><a name="ZONEMGR_NO_MASTER_ADDRESS"></a><span class="term">ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master</span></dt><dd><p>
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+</p></dd><dt><a name="ZONEMGR_NO_SOA"></a><span class="term">ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record</span></dt><dd><p>
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+</p></dd><dt><a name="ZONEMGR_NO_TIMER_THREAD"></a><span class="term">ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running</span></dt><dd><p>
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_NO_ZONE_CLASS"></a><span class="term">ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone</span></dt><dd><p>
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</p></dd><dt><a name="ZONEMGR_NO_ZONE_NAME"></a><span class="term">ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone</span></dt><dd><p>
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_NOTIFY"></a><span class="term">ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_SHUTDOWN"></a><span class="term">ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_UNKNOWN"></a><span class="term">ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'</span></dt><dd><p>
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_XFRIN_FAILED"></a><span class="term">ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_XFRIN_SUCCESS"></a><span class="term">ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+</p></dd><dt><a name="ZONEMGR_REFRESH_ZONE"></a><span class="term">ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)</span></dt><dd><p>
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+</p></dd><dt><a name="ZONEMGR_SELECT_ERROR"></a><span class="term">ZONEMGR_SELECT_ERROR error with select(): %1</span></dt><dd><p>
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+</p></dd><dt><a name="ZONEMGR_SEND_FAIL"></a><span class="term">ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed</span></dt><dd><p>
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+</p></dd><dt><a name="ZONEMGR_SESSION_ERROR"></a><span class="term">ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon</span></dt><dd><p>
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</p></dd><dt><a name="ZONEMGR_SESSION_TIMEOUT"></a><span class="term">ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon</span></dt><dd><p>
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</p></dd><dt><a name="ZONEMGR_SHUTDOWN"></a><span class="term">ZONEMGR_SHUTDOWN zone manager has shut down</span></dt><dd><p>
+A debug message, output when the zone manager has shut down completely.
+</p></dd><dt><a name="ZONEMGR_STARTING"></a><span class="term">ZONEMGR_STARTING zone manager starting</span></dt><dd><p>
+A debug message output when the zone manager starts up.
+</p></dd><dt><a name="ZONEMGR_TIMER_THREAD_RUNNING"></a><span class="term">ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running</span></dt><dd><p>
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_FAIL"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_NOTIFIED"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_SUCCESS"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
</p></dd></dl></div><p>
</p></div></div></body></html>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
index eaa8bb9..f5c44b3 100644
--- a/doc/guide/bind10-messages.xml
+++ b/doc/guide/bind10-messages.xml
@@ -5,6 +5,12 @@
<!ENTITY % version SYSTEM "version.ent">
%version;
]>
+<!--
+ This XML document is generated using the system_messages.py tool
+ based on the .mes message files.
+
+ Do not edit this file.
+-->
<book>
<?xml-stylesheet href="bind10-guide.css" type="text/css"?>
@@ -62,16 +68,16 @@
<para>
<variablelist>
-<varlistentry id="ASIODNS_FETCHCOMP">
-<term>ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</term>
+<varlistentry id="ASIODNS_FETCH_COMPLETED">
+<term>ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</term>
<listitem><para>
-A debug message, this records the the upstream fetch (a query made by the
+A debug message, this records that the upstream fetch (a query made by the
resolver on behalf of its client) to the specified address has completed.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_FETCHSTOP">
-<term>ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</term>
+<varlistentry id="ASIODNS_FETCH_STOPPED">
+<term>ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</term>
<listitem><para>
An external component has requested the halting of an upstream fetch. This
is an allowed operation, and the message should only appear if debug is
@@ -79,27 +85,27 @@ enabled.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_OPENSOCK">
-<term>ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</term>
+<varlistentry id="ASIODNS_OPEN_SOCKET">
+<term>ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</term>
<listitem><para>
The asynchronous I/O code encountered an error when trying to open a socket
of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
message.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_RECVSOCK">
-<term>ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</term>
+<varlistentry id="ASIODNS_READ_DATA">
+<term>ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</term>
<listitem><para>
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_RECVTMO">
-<term>ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</term>
+<varlistentry id="ASIODNS_READ_TIMEOUT">
+<term>ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</term>
<listitem><para>
An upstream fetch from the specified address timed out. This may happen for
any number of reasons and is most probably a problem at the remote server
@@ -108,29 +114,1436 @@ enabled.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_SENDSOCK">
-<term>ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</term>
+<varlistentry id="ASIODNS_SEND_DATA">
+<term>ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_ORIGIN">
+<term>ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<listitem><para>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_RESULT">
+<term>ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<listitem><para>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_ERROR">
+<term>AUTH_AXFR_ERROR error handling AXFR request: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_UDP">
+<term>AUTH_AXFR_UDP AXFR query received over UDP</term>
+<listitem><para>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_COMMAND_FAILED">
+<term>AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</term>
+<listitem><para>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_CREATED">
+<term>AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</term>
+<listitem><para>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_ESTABLISHED">
+<term>AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</term>
+<listitem><para>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_STARTED">
+<term>AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</term>
+<listitem><para>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_LOAD_FAIL">
+<term>AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</term>
+<listitem><para>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_UPDATE_FAIL">
+<term>AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</term>
+<listitem><para>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DATA_SOURCE">
+<term>AUTH_DATA_SOURCE data source database file: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DNS_SERVICES_CREATED">
+<term>AUTH_DNS_SERVICES_CREATED DNS services created</term>
+<listitem><para>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_HEADER_PARSE_FAIL">
+<term>AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_TSIG">
+<term>AUTH_LOAD_TSIG loading TSIG keys</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_ZONE">
+<term>AUTH_LOAD_ZONE loaded zone %1/%2</term>
+<listitem><para>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_DISABLED">
+<term>AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_ENABLED">
+<term>AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_QUESTIONS">
+<term>AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_RRTYPE">
+<term>AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_STATS_SESSION">
+<term>AUTH_NO_STATS_SESSION session interface for statistics is not available</term>
+<listitem><para>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_XFRIN">
+<term>AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PARSE_ERROR">
+<term>AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PROTOCOL_ERROR">
+<term>AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_RECEIVED">
+<term>AUTH_PACKET_RECEIVED message received:\n%1</term>
+<listitem><para>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</para><para>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PROCESS_FAIL">
+<term>AUTH_PROCESS_FAIL message processing failure: %1</term>
+<listitem><para>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</para><para>
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_COMMAND">
+<term>AUTH_RECEIVED_COMMAND command '%1' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_SENDSTATS">
+<term>AUTH_RECEIVED_SENDSTATS command 'sendstats' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RESPONSE_RECEIVED">
+<term>AUTH_RESPONSE_RECEIVED received response message, ignoring</term>
+<listitem><para>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_ERROR_RESPONSE">
+<term>AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</term>
+<listitem><para>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_NORMAL_RESPONSE">
+<term>AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</term>
+<listitem><para>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_CREATED">
+<term>AUTH_SERVER_CREATED server created</term>
+<listitem><para>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_FAILED">
+<term>AUTH_SERVER_FAILED server failed: %1</term>
+<listitem><para>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_STARTED">
+<term>AUTH_SERVER_STARTED server started</term>
+<listitem><para>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SQLITE3">
+<term>AUTH_SQLITE3 nothing to do for loading sqlite3</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_CREATED">
+<term>AUTH_STATS_CHANNEL_CREATED STATS session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_ESTABLISHED">
+<term>AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_COMMS">
+<term>AUTH_STATS_COMMS communication error in sending statistics data: %1</term>
+<listitem><para>
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMEOUT">
+<term>AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</term>
+<listitem><para>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_DISABLED">
+<term>AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_SET">
+<term>AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_UNSUPPORTED_OPCODE">
+<term>AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</term>
+<listitem><para>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_CREATED">
+<term>AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_ESTABLISHED">
+<term>AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_COMMS">
+<term>AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_ERROR">
+<term>AUTH_ZONEMGR_ERROR received error response from zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CHECK_MSGQ_ALREADY_RUNNING">
+<term>BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running</term>
+<listitem><para>
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATION_START_AUTH">
+<term>BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</term>
+<listitem><para>
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATION_START_RESOLVER">
+<term>BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</term>
+<listitem><para>
+This message shows whether or not the resolver should be
+started according to the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_INVALID_USER">
+<term>BIND10_INVALID_USER invalid user: %1</term>
+<listitem><para>
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_KILLING_ALL_PROCESSES">
+<term>BIND10_KILLING_ALL_PROCESSES killing all started processes</term>
+<listitem><para>
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_KILL_PROCESS">
+<term>BIND10_KILL_PROCESS killing process %1</term>
+<listitem><para>
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_ALREADY_RUNNING">
+<term>BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start</term>
+<listitem><para>
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_DAEMON_ENDED">
+<term>BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</term>
+<listitem><para>
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_DISAPPEARED">
+<term>BIND10_MSGQ_DISAPPEARED msgq channel disappeared</term>
+<listitem><para>
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_PROCESS_ENDED_NO_EXIT_STATUS">
+<term>BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</term>
+<listitem><para>
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS">
+<term>BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</term>
+<listitem><para>
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_READING_BOSS_CONFIGURATION">
+<term>BIND10_READING_BOSS_CONFIGURATION reading boss configuration</term>
+<listitem><para>
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_COMMAND">
+<term>BIND10_RECEIVED_COMMAND received command: %1</term>
+<listitem><para>
+The boss module received a command and shall now process it. The command
+is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_NEW_CONFIGURATION">
+<term>BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1</term>
+<listitem><para>
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_SIGNAL">
+<term>BIND10_RECEIVED_SIGNAL received signal %1</term>
+<listitem><para>
+The boss module received the given signal.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RESURRECTED_PROCESS">
+<term>BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)</term>
+<listitem><para>
+The given process has been restarted successfully, and is now running
+with the given process id.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RESURRECTING_PROCESS">
+<term>BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...</term>
+<listitem><para>
+The given process has ended unexpectedly, and is now restarted.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SELECT_ERROR">
+<term>BIND10_SELECT_ERROR error in select() call: %1</term>
+<listitem><para>
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SEND_SIGKILL">
+<term>BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)</term>
+<listitem><para>
+The boss module is sending a SIGKILL signal to the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SEND_SIGTERM">
+<term>BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</term>
+<listitem><para>
+The boss module is sending a SIGTERM signal to the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SHUTDOWN">
+<term>BIND10_SHUTDOWN stopping the server</term>
+<listitem><para>
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SHUTDOWN_COMPLETE">
+<term>BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete</term>
+<listitem><para>
+All child processes have been stopped, and the boss process will now
+stop itself.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_BAD_CAUSE">
+<term>BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1</term>
+<listitem><para>
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_BAD_RESPONSE">
+<term>BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</term>
+<listitem><para>
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_CRASHED">
+<term>BIND10_SOCKCREATOR_CRASHED the socket creator crashed</term>
+<listitem><para>
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_EOF">
+<term>BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</term>
+<listitem><para>
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_INIT">
+<term>BIND10_SOCKCREATOR_INIT initializing socket creator parser</term>
+<listitem><para>
+The boss module initializes routines for parsing the socket creator
+protocol.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_KILL">
+<term>BIND10_SOCKCREATOR_KILL killing the socket creator</term>
+<listitem><para>
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_TERMINATE">
+<term>BIND10_SOCKCREATOR_TERMINATE terminating socket creator</term>
+<listitem><para>
+The boss module sends a request to terminate to the socket creator.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_TRANSPORT_ERROR">
+<term>BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1</term>
+<listitem><para>
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_CREATED">
+<term>BIND10_SOCKET_CREATED successfully created socket %1</term>
+<listitem><para>
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_ERROR">
+<term>BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3</term>
+<listitem><para>
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_GET">
+<term>BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</term>
+<listitem><para>
+The boss forwards a request for a socket to the socket creator.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_PROCESS">
+<term>BIND10_STARTED_PROCESS started %1</term>
+<listitem><para>
+The given process has successfully been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_PROCESS_PID">
+<term>BIND10_STARTED_PROCESS_PID started %1 (PID %2)</term>
+<listitem><para>
+The given process has successfully been started, and has the given PID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING">
+<term>BIND10_STARTING starting BIND10: %1</term>
+<listitem><para>
+Informational message on startup that shows the full version.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS">
+<term>BIND10_STARTING_PROCESS starting process %1</term>
+<listitem><para>
+The boss module is starting the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS_PORT">
+<term>BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</term>
+<listitem><para>
+The boss module is starting the given process, which will listen on the
+given port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS_PORT_ADDRESS">
+<term>BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)</term>
+<listitem><para>
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_COMPLETE">
+<term>BIND10_STARTUP_COMPLETE BIND 10 started</term>
+<listitem><para>
+All modules have been successfully started, and BIND 10 is now running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_ERROR">
+<term>BIND10_STARTUP_ERROR error during startup: %1</term>
+<listitem><para>
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT">
+<term>BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</term>
+<listitem><para>
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STOP_PROCESS">
+<term>BIND10_STOP_PROCESS asking %1 to shut down</term>
+<listitem><para>
+The boss module is sending a shutdown command to the given module over
+the message channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_UNKNOWN_CHILD_PROCESS_ENDED">
+<term>BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</term>
+<listitem><para>
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_ENTRY_MISSING_RRSET">
+<term>CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</term>
+<listitem><para>
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_FOUND">
+<term>CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data</term>
+<listitem><para>
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_UNKNOWN">
+<term>CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data</term>
+<listitem><para>
+Debug message. The requested data was not found in the local zone data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_UPDATE">
+<term>CACHE_LOCALZONE_UPDATE updating local zone element at key %1</term>
+<listitem><para>
+Debug message issued when there's update to the local zone section of cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_DEINIT">
+<term>CACHE_MESSAGES_DEINIT deinitialized message cache</term>
+<listitem><para>
+Debug message. It is issued when the server deinitializes the message cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_EXPIRED">
+<term>CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache</term>
+<listitem><para>
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_FOUND">
+<term>CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache</term>
+<listitem><para>
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_INIT">
+<term>CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2</term>
+<listitem><para>
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_REMOVE">
+<term>CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first</term>
+<listitem><para>
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UNCACHEABLE">
+<term>CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3</term>
+<listitem><para>
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UNKNOWN">
+<term>CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache</term>
<listitem><para>
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
+Debug message. The message cache didn't find any entry for the given key.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_UNKORIGIN">
-<term>ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<varlistentry id="CACHE_MESSAGES_UPDATE">
+<term>CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3</term>
<listitem><para>
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_UNKRESULT">
-<term>ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<varlistentry id="CACHE_RESOLVER_DEEPEST">
+<term>CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2</term>
<listitem><para>
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message). This message should
-not appear and may indicate an internal error. Please enter a bug report.
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_INIT">
+<term>CACHE_RESOLVER_INIT initializing resolver cache for class %1</term>
+<listitem><para>
+Debug message. The resolver cache is being created for this given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_INIT_INFO">
+<term>CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1</term>
+<listitem><para>
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOCAL_MSG">
+<term>CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data</term>
+<listitem><para>
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOCAL_RRSET">
+<term>CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data</term>
+<listitem><para>
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOOKUP_MSG">
+<term>CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOOKUP_RRSET">
+<term>CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_NO_QUESTION">
+<term>CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section</term>
+<listitem><para>
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UNKNOWN_CLASS_MSG">
+<term>CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UNKNOWN_CLASS_RRSET">
+<term>CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_MSG">
+<term>CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3</term>
+<listitem><para>
+Debug message. The resolver is updating a message in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_RRSET">
+<term>CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3</term>
+<listitem><para>
+Debug message. The resolver is updating an RRset in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG">
+<term>CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET">
+<term>CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_EXPIRED">
+<term>CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3</term>
+<listitem><para>
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_INIT">
+<term>CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2</term>
+<listitem><para>
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_LOOKUP">
+<term>CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</term>
+<listitem><para>
+Debug message. The resolver is trying to look up data in the RRset cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_NOT_FOUND">
+<term>CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_REMOVE_OLD">
+<term>CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_UNTRUSTED">
+<term>CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_UPDATE">
+<term>CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache</term>
+<listitem><para>
+Debug message. The RRset is updating its data with this given RRset.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ASYNC_READ_FAILED">
+<term>CC_ASYNC_READ_FAILED asynchronous read failed</term>
+<listitem><para>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_CONN_ERROR">
+<term>CC_CONN_ERROR error connecting to message queue (%1)</term>
+<listitem><para>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_DISCONNECT">
+<term>CC_DISCONNECT disconnecting from message queue daemon</term>
+<listitem><para>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISH">
+<term>CC_ESTABLISH trying to establish connection with message queue daemon at %1</term>
+<listitem><para>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISHED">
+<term>CC_ESTABLISHED successfully connected to message queue daemon</term>
+<listitem><para>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVE">
+<term>CC_GROUP_RECEIVE trying to receive a message</term>
+<listitem><para>
+Debug message, noting that a message is expected to come over the command
+channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVED">
+<term>CC_GROUP_RECEIVED message arrived ('%1', '%2')</term>
+<listitem><para>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_SEND">
+<term>CC_GROUP_SEND sending message '%1' to group '%2'</term>
+<listitem><para>
+Debug message, we're about to send a message over the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_INVALID_LENGTHS">
+<term>CC_INVALID_LENGTHS invalid length parameters (%1, %2)</term>
+<listitem><para>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_LENGTH_NOT_READY">
+<term>CC_LENGTH_NOT_READY length not ready</term>
+<listitem><para>
+There should be data representing the length of message on the socket, but it
+is not there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MESSAGE">
+<term>CC_NO_MESSAGE no message ready to be received yet</term>
+<listitem><para>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MSGQ">
+<term>CC_NO_MSGQ unable to connect to message queue (%1)</term>
+<listitem><para>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_ERROR">
+<term>CC_READ_ERROR error reading data from command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_EXCEPTION">
+<term>CC_READ_EXCEPTION error reading data from command channel (%1)</term>
+<listitem><para>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_REPLY">
+<term>CC_REPLY replying to message from '%1' with '%2'</term>
+<listitem><para>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SET_TIMEOUT">
+<term>CC_SET_TIMEOUT setting timeout to %1ms</term>
+<listitem><para>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_START_READ">
+<term>CC_START_READ starting asynchronous read</term>
+<listitem><para>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SUBSCRIBE">
+<term>CC_SUBSCRIBE subscribing to communication group %1</term>
+<listitem><para>
+Debug message. The program wants to receive messages addressed to this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_TIMEOUT">
+<term>CC_TIMEOUT timeout reading data from command channel</term>
+<listitem><para>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_UNSUBSCRIBE">
+<term>CC_UNSUBSCRIBE unsubscribing from communication group %1</term>
+<listitem><para>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_WRITE_ERROR">
+<term>CC_WRITE_ERROR error writing data to command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ZERO_LENGTH">
+<term>CC_ZERO_LENGTH invalid message length (0)</term>
+<listitem><para>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE">
+<term>CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</term>
+<listitem><para>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE">
+<term>CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2</term>
+<listitem><para>
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_CC_SESSION_ERROR">
+<term>CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</term>
+<listitem><para>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_DATA_READ_ERROR">
+<term>CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</term>
+<listitem><para>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_STOPPED_BY_KEYBOARD">
+<term>CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_BAD_CONFIG_DATA">
+<term>CMDCTL_BAD_CONFIG_DATA error in config data: %1</term>
+<listitem><para>
+There was an error reading the updated configuration data. The specific
+error is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_BAD_PASSWORD">
+<term>CMDCTL_BAD_PASSWORD bad password for user: %1</term>
+<listitem><para>
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_CC_SESSION_ERROR">
+<term>CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_CC_SESSION_TIMEOUT">
+<term>CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel</term>
+<listitem><para>
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_COMMAND_ERROR">
+<term>CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3</term>
+<listitem><para>
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_COMMAND_SENT">
+<term>CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent</term>
+<listitem><para>
+This debug message indicates that the given command has been sent to
+the given module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_NO_SUCH_USER">
+<term>CMDCTL_NO_SUCH_USER username not found in user database: %1</term>
+<listitem><para>
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_NO_USER_ENTRIES_READ">
+<term>CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied</term>
+<listitem><para>
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_SEND_COMMAND">
+<term>CMDCTL_SEND_COMMAND sending command %1 to module %2</term>
+<listitem><para>
+This debug message indicates that the given command is being sent to
+the given module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_SSL_SETUP_FAILURE_USER_DENIED">
+<term>CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1</term>
+<listitem><para>
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_STOPPED_BY_KEYBOARD">
+<term>CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_UNCAUGHT_EXCEPTION">
+<term>CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1</term>
+<listitem><para>
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_USER_DATABASE_READ_ERROR">
+<term>CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2</term>
+<listitem><para>
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
</para></listitem>
</varlistentry>
@@ -148,65 +1561,128 @@ The message itself is ignored by this module.
<varlistentry id="CONFIG_CCSESSION_MSG_INTERNAL">
<term>CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</term>
<listitem><para>
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</para><para>
+The most likely cause of this error is a programming error. Please raise
+a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_FOPEN_ERR">
-<term>CONFIG_FOPEN_ERR error opening %1: %2</term>
+<varlistentry id="CONFIG_GET_FAIL">
+<term>CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</term>
<listitem><para>
-There was an error opening the given file.
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_GET_FAILED">
+<term>CONFIG_GET_FAILED error getting configuration from cfgmgr: %1</term>
+<listitem><para>
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
</para></listitem>
</varlistentry>
<varlistentry id="CONFIG_JSON_PARSE">
<term>CONFIG_JSON_PARSE JSON parse error in %1: %2</term>
<listitem><para>
-There was a parse error in the JSON file. The given file does not appear
+There was an error parsing the JSON file. The given file does not appear
to be in valid JSON format. Please verify that the filename is correct
and that the contents are valid JSON.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MANAGER_CONFIG">
-<term>CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</term>
+<varlistentry id="CONFIG_LOG_CONFIG_ERRORS">
+<term>CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1</term>
<listitem><para>
-The configuration manager returned an error when this module requested
-the configuration. The full error message answer from the configuration
-manager is appended to the log error. The most likely cause is that
-the module is of a different (command specification) version than the
-running configuration manager.
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MANAGER_MOD_SPEC">
-<term>CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</term>
+<varlistentry id="CONFIG_LOG_EXPLICIT">
+<term>CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1</term>
<listitem><para>
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program. The logging
+configuration for the program will be updated with the information.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MODULE_SPEC">
-<term>CONFIG_MODULE_SPEC module specification error in %1: %2</term>
+<varlistentry id="CONFIG_LOG_IGNORE_EXPLICIT">
+<term>CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1</term>
<listitem><para>
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger. As this does not match the logger specification for the
+program, it has been ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_IGNORE_WILD">
+<term>CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1</term>
+<listitem><para>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry. The configuration is ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_WILD_MATCH">
+<term>CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1</term>
+<listitem><para>
+This is a debug message. When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MOD_SPEC_FORMAT">
+<term>CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</term>
+<listitem><para>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MOD_SPEC_REJECT">
+<term>CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</term>
+<listitem><para>
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_OPEN_FAIL">
+<term>CONFIG_OPEN_FAIL error opening %1: %2</term>
+<listitem><para>
+There was an error opening the given file. The reason for the failure
+is included in the message.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_CREATE">
<term>DATASRC_CACHE_CREATE creating the hotspot cache</term>
<listitem><para>
-Debug information that the hotspot cache was created at startup.
+This is a debug message issued during startup when the hotspot cache
+is created.
</para></listitem>
</varlistentry>
@@ -218,39 +1694,37 @@ Debug information. The hotspot cache is being destroyed.
</varlistentry>
<varlistentry id="DATASRC_CACHE_DISABLE">
-<term>DATASRC_CACHE_DISABLE disabling the cache</term>
+<term>DATASRC_CACHE_DISABLE disabling the hotspot cache</term>
<listitem><para>
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
+A debug message issued when the hotspot cache is disabled.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_ENABLE">
-<term>DATASRC_CACHE_ENABLE enabling the cache</term>
+<term>DATASRC_CACHE_ENABLE enabling the hotspot cache</term>
<listitem><para>
-The hotspot cache is enabled from now on.
+A debug message issued when the hotspot cache is enabled.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_EXPIRED">
-<term>DATASRC_CACHE_EXPIRED the item '%1' is expired</term>
+<term>DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired</term>
<listitem><para>
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
+A debug message issued when a hotspot cache lookup located the item but it
+had expired. The item was removed and the program proceeded as if the item
+had not been found.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_FOUND">
<term>DATASRC_CACHE_FOUND the item '%1' was found</term>
<listitem><para>
-Debug information. An item was successfully looked up in the hotspot cache.
+Debug information. An item was successfully located in the hotspot cache.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_FULL">
-<term>DATASRC_CACHE_FULL cache is full, dropping oldest</term>
+<term>DATASRC_CACHE_FULL hotspot cache is full, dropping oldest</term>
<listitem><para>
Debug information. After inserting an item into the hotspot cache, the
maximum number of items was exceeded, so the least recently used item will
@@ -259,39 +1733,39 @@ be dropped. This should be directly followed by CACHE_REMOVE.
</varlistentry>
<varlistentry id="DATASRC_CACHE_INSERT">
-<term>DATASRC_CACHE_INSERT inserting item '%1' into the cache</term>
+<term>DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache</term>
<listitem><para>
-Debug information. It means a new item is being inserted into the hotspot
+A debug message indicating that a new item is being inserted into the hotspot
cache.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_NOT_FOUND">
-<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found</term>
+<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache</term>
<listitem><para>
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_OLD_FOUND">
-<term>DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</term>
+<term>DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing</term>
<listitem><para>
Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_REMOVE">
-<term>DATASRC_CACHE_REMOVE removing '%1' from the cache</term>
+<term>DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache</term>
<listitem><para>
Debug information. An item is being removed from the hotspot cache.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_SLOTS">
-<term>DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</term>
+<term>DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items</term>
<listitem><para>
The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
@@ -299,11 +1773,109 @@ means no limit.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_DATABASE_FIND_ERROR">
+<term>DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</term>
+<listitem><para>
+This was an internal error while reading data from a datasource. This can either
+mean the specific data source implementation is not behaving correctly, or the
+data it provides is invalid. The current search is aborted.
+The error message contains specific information about the error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_RECORDS">
+<term>DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</term>
+<listitem><para>
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_TTL_MISMATCH">
+<term>DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</term>
+<listitem><para>
+The datasource backend provided resource records for the given RRset with
+different TTL values. The TTL of the RRSET is set to the lowest value, which
+is printed in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR">
+<term>DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</term>
+<listitem><para>
+There was an uncaught general exception while reading data from a datasource.
+This most likely points to a logic error in the code, and can be considered a
+bug. The current search is aborted. Specific information about the exception is
+printed in this error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR">
+<term>DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</term>
+<listitem><para>
+There was an uncaught ISC exception while reading data from a datasource. This
+most likely points to a logic error in the code, and can be considered a bug.
+The current search is aborted. Specific information about the exception is
+printed in this error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DELEGATION">
+<term>DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</term>
+<listitem><para>
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DELEGATION_EXACT">
+<term>DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1</term>
+<listitem><para>
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DNAME">
+<term>DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1</term>
+<listitem><para>
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_NXDOMAIN">
+<term>DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</term>
+<listitem><para>
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_NXRRSET">
+<term>DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4</term>
+<listitem><para>
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_RRSET">
+<term>DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2</term>
+<listitem><para>
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_DO_QUERY">
<term>DATASRC_DO_QUERY handling query for '%1/%2'</term>
<listitem><para>
-Debug information. We're processing some internal query for given name and
-type.
+A debug message indicating that a query for the given name and RR type is being
+processed.
</para></listitem>
</varlistentry>
@@ -317,8 +1889,9 @@ Debug information. An RRset is being added to the in-memory data source.
<varlistentry id="DATASRC_MEM_ADD_WILDCARD">
<term>DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</term>
<listitem><para>
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
</para></listitem>
</varlistentry>
@@ -349,7 +1922,7 @@ returning the CNAME instead.
<term>DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</term>
<listitem><para>
This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
+other way around -- adding some other data to CNAME.
</para></listitem>
</varlistentry>
@@ -401,11 +1974,11 @@ Debug information. A DNAME was found instead of the requested information.
</varlistentry>
<varlistentry id="DATASRC_MEM_DNAME_NS">
-<term>DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</term>
+<term>DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</term>
<listitem><para>
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
</para></listitem>
</varlistentry>
@@ -457,8 +2030,8 @@ Debug information. The content of master file is being loaded into the memory.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_MEM_NOTFOUND">
-<term>DATASRC_MEM_NOTFOUND requested domain '%1' not found</term>
+<varlistentry id="DATASRC_MEM_NOT_FOUND">
+<term>DATASRC_MEM_NOT_FOUND requested domain '%1' not found</term>
<listitem><para>
Debug information. The requested domain does not exist.
</para></listitem>
@@ -544,7 +2117,7 @@ behaviour is specified by RFC 1034, section 4.3.3
</varlistentry>
<varlistentry id="DATASRC_MEM_WILDCARD_DNAME">
-<term>DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</term>
+<term>DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</term>
<listitem><para>
The software refuses to load DNAME records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
@@ -554,7 +2127,7 @@ different tools.
</varlistentry>
<varlistentry id="DATASRC_MEM_WILDCARD_NS">
-<term>DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</term>
+<term>DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</term>
<listitem><para>
The software refuses to load NS records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
@@ -566,15 +2139,15 @@ different tools.
<varlistentry id="DATASRC_META_ADD">
<term>DATASRC_META_ADD adding a data source into meta data source</term>
<listitem><para>
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_META_ADD_CLASS_MISMATCH">
<term>DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</term>
<listitem><para>
-It was attempted to add a data source into a meta data source. But their
+It was attempted to add a data source into a meta data source, but their
classes do not match.
</para></listitem>
</varlistentry>
@@ -634,7 +2207,7 @@ information for it.
</varlistentry>
<varlistentry id="DATASRC_QUERY_CACHED">
-<term>DATASRC_QUERY_CACHED data for %1/%2 found in cache</term>
+<term>DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache</term>
<listitem><para>
Debug information. The requested data were found in the hotspot cache, so
no query is sent to the real data source.
@@ -642,7 +2215,7 @@ no query is sent to the real data source.
</varlistentry>
<varlistentry id="DATASRC_QUERY_CHECK_CACHE">
-<term>DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</term>
+<term>DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'</term>
<listitem><para>
Debug information. While processing a query, lookup to the hotspot cache
is being made.
@@ -666,12 +2239,11 @@ way down to the given domain.
</varlistentry>
<varlistentry id="DATASRC_QUERY_EMPTY_CNAME">
-<term>DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</term>
+<term>DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</term>
<listitem><para>
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
</para></listitem>
</varlistentry>
@@ -687,15 +2259,15 @@ DNAME is empty (it has no records). This indicates problem with supplied data.
<term>DATASRC_QUERY_FAIL query failed</term>
<listitem><para>
Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
+already and a SERVFAIL will be returned to the querying system.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_QUERY_FOLLOW_CNAME">
<term>DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</term>
<listitem><para>
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
</para></listitem>
</varlistentry>
@@ -744,14 +2316,14 @@ Debug information. The last DO_QUERY is an auth query.
<varlistentry id="DATASRC_QUERY_IS_GLUE">
<term>DATASRC_QUERY_IS_GLUE glue query (%1/%2)</term>
<listitem><para>
-Debug information. The last DO_QUERY is query for glue addresses.
+Debug information. The last DO_QUERY is a query for glue addresses.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_QUERY_IS_NOGLUE">
<term>DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</term>
<listitem><para>
-Debug information. The last DO_QUERY is query for addresses that are not
+Debug information. The last DO_QUERY is a query for addresses that are not
glue.
</para></listitem>
</varlistentry>
@@ -759,7 +2331,7 @@ glue.
<varlistentry id="DATASRC_QUERY_IS_REF">
<term>DATASRC_QUERY_IS_REF query for referral (%1/%2)</term>
<listitem><para>
-Debug information. The last DO_QUERY is query for referral information.
+Debug information. The last DO_QUERY is a query for referral information.
</para></listitem>
</varlistentry>
@@ -806,7 +2378,7 @@ error already.
</varlistentry>
<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_AUTH">
-<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)</term>
<listitem><para>
Debug information. The hotspot cache is ignored for authoritative ANY queries
for consistency reasons.
@@ -814,7 +2386,7 @@ for consistency reasons.
</varlistentry>
<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE">
-<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)</term>
<listitem><para>
Debug information. The hotspot cache is ignored for ANY queries for consistency
reasons.
@@ -852,8 +2424,8 @@ Debug information. A sure query is being processed now.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_QUERY_PROVENX_FAIL">
-<term>DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</term>
+<varlistentry id="DATASRC_QUERY_PROVE_NX_FAIL">
+<term>DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'</term>
<listitem><para>
The user wants DNSSEC and we discovered the entity doesn't exist (either
domain or the record). But there was an error getting NSEC/NSEC3 record
@@ -890,9 +2462,9 @@ error already.
<varlistentry id="DATASRC_QUERY_SYNTH_CNAME">
<term>DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</term>
<listitem><para>
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
</para></listitem>
</varlistentry>
@@ -905,7 +2477,7 @@ already. The code is 1 for error, 2 for not implemented.
</varlistentry>
<varlistentry id="DATASRC_QUERY_TOO_MANY_CNAMES">
-<term>DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</term>
+<term>DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</term>
<listitem><para>
A CNAME led to another CNAME and it led to another, and so on. After 16
CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
@@ -938,8 +2510,8 @@ exact kind was hopefully already reported.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_QUERY_WILDCARD_PROVENX_FAIL">
-<term>DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</term>
+<varlistentry id="DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL">
+<term>DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)</term>
<listitem><para>
While processing a wildcard, it wasn't possible to prove nonexistence of the
given domain or record. The code is 1 for error and 2 for not implemented.
@@ -961,32 +2533,53 @@ Debug information. The SQLite data source is closing the database file.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_SQLITE_CONNCLOSE">
+<term>DATASRC_SQLITE_CONNCLOSE Closing sqlite database</term>
+<listitem><para>
+The database file is no longer needed and is being closed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CONNOPEN">
+<term>DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'</term>
+<listitem><para>
+The database file is being opened so it can start providing data.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_SQLITE_CREATE">
-<term>DATASRC_SQLITE_CREATE sQLite data source created</term>
+<term>DATASRC_SQLITE_CREATE SQLite data source created</term>
<listitem><para>
Debug information. An instance of SQLite data source is being created.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_SQLITE_DESTROY">
-<term>DATASRC_SQLITE_DESTROY sQLite data source destroyed</term>
+<term>DATASRC_SQLITE_DESTROY SQLite data source destroyed</term>
<listitem><para>
Debug information. An instance of SQLite data source is being destroyed.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_SQLITE_DROPCONN">
+<term>DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized</term>
+<listitem><para>
+The object around a database connection is being destroyed.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_SQLITE_ENCLOSURE">
<term>DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</term>
<listitem><para>
-Debug information. The SQLite data source is trying to identify, which zone
+Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOTFOUND">
-<term>DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</term>
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOT_FOUND">
+<term>DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'</term>
<listitem><para>
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
no such zone in our data.
</para></listitem>
</varlistentry>
@@ -1050,7 +2643,7 @@ a referral and where it goes.
<varlistentry id="DATASRC_SQLITE_FINDREF_BAD_CLASS">
<term>DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</term>
<listitem><para>
-The SQLite data source was trying to identify, if there's a referral. But
+The SQLite data source was trying to identify if there's a referral. But
it contains different class than the query was for.
</para></listitem>
</varlistentry>
@@ -1079,6 +2672,13 @@ But it doesn't contain that zone.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_SQLITE_NEWCONN">
+<term>DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized</term>
+<listitem><para>
+A wrapper object to hold database connection is being initialized.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_SQLITE_OPEN">
<term>DATASRC_SQLITE_OPEN opening SQLite database '%1'</term>
<listitem><para>
@@ -1090,15 +2690,22 @@ the provided file.
<varlistentry id="DATASRC_SQLITE_PREVIOUS">
<term>DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</term>
<listitem><para>
-Debug information. We're trying to look up name preceding the supplied one.
+This is a debug message. The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_SQLITE_PREVIOUS_NO_ZONE">
<term>DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</term>
<listitem><para>
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
</para></listitem>
</varlistentry>
@@ -1111,11 +2718,11 @@ no data, but it will be ready for use.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_STATIC_BAD_CLASS">
-<term>DATASRC_STATIC_BAD_CLASS static data source can handle CH only</term>
+<varlistentry id="DATASRC_STATIC_CLASS_NOT_CH">
+<term>DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only</term>
<listitem><para>
-For some reason, someone asked the static data source a query that is not in
-the CH class.
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
</para></listitem>
</varlistentry>
@@ -1143,294 +2750,436 @@ generated.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_ABOVEDBGMAX">
-<term>LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</term>
+<varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
+<term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
<listitem><para>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_BADDEBUG">
-<term>LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</term>
+<varlistentry id="LOGIMPL_BAD_DEBUG_STRING">
+<term>LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</term>
<listitem><para>
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form. In particular,
-it starts DEBUG but does not end with an integer.
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_BELOWDBGMIN">
-<term>LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</term>
+<varlistentry id="LOGIMPL_BELOW_MIN_DEBUG">
+<term>LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</term>
<listitem><para>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADDESTINATION">
-<term>MSG_BADDESTINATION unrecognized log destination: %1</term>
+<varlistentry id="LOG_BAD_DESTINATION">
+<term>LOG_BAD_DESTINATION unrecognized log destination: %1</term>
<listitem><para>
A logger destination value was given that was not recognized. The
destination should be one of "console", "file", or "syslog".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADSEVERITY">
-<term>MSG_BADSEVERITY unrecognized log severity: %1</term>
+<varlistentry id="LOG_BAD_SEVERITY">
+<term>LOG_BAD_SEVERITY unrecognized log severity: %1</term>
<listitem><para>
A logger severity value was given that was not recognized. The severity
-should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADSTREAM">
-<term>MSG_BADSTREAM bad log console output stream: %1</term>
+<varlistentry id="LOG_BAD_STREAM">
+<term>LOG_BAD_STREAM bad log console output stream: %1</term>
<listitem><para>
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_DUPLNS">
-<term>MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</term>
+<varlistentry id="LOG_DUPLICATE_MESSAGE_ID">
+<term>LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</term>
<listitem><para>
-When reading a message file, more than one $NAMESPACE directive was found. In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code. This indicates a
+programming error; please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_DUPMSGID">
-<term>MSG_DUPMSGID duplicate message ID (%1) in compiled code</term>
+<varlistentry id="LOG_DUPLICATE_NAMESPACE">
+<term>LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</term>
<listitem><para>
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules. (All message
-IDs should be unique throughout BIND10.) This has no impact on the operation
-of the server other that erroneous messages may be logged. (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded. However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.) Such a condition is regarded as an
+error and the read will be abandoned.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_IDNOTFND">
-<term>MSG_IDNOTFND could not replace message text for '%1': no such message</term>
+<varlistentry id="LOG_INPUT_OPEN_FAIL">
+<term>LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</term>
+<listitem><para>
+The program was not able to open the specified input message file for
+the reason given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_INVALID_MESSAGE_ID">
+<term>LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</term>
+<listitem><para>
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_EXTRA_ARGS">
+<term>LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</term>
+<listitem><para>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_INVALID_ARG">
+<term>LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<listitem><para>
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_NO_ARGS">
+<term>LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</term>
+<listitem><para>
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_MESSAGE_ID">
+<term>LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</term>
+<listitem><para>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_MESSAGE_TEXT">
+<term>LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</term>
+<listitem><para>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_SUCH_MESSAGE">
+<term>LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</term>
<listitem><para>
During start-up a local message file was read. A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary. Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
</para><para>
-This message may appear a number of times in the file, once for every such
-unknown message identification.
+There may be several reasons why this message may appear:
+</para><para>
+- The message ID has been mis-spelled in the local message file.
+</para><para>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</para><para>
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+</para><para>
+Whatever the reason, there is no impact on the operation of BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_OPEN_OUTPUT_FAIL">
+<term>LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</term>
+<listitem><para>
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_INVMSGID">
-<term>MSG_INVMSGID line %1: invalid message identification '%2'</term>
+<varlistentry id="LOG_PREFIX_EXTRA_ARGS">
+<term>LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</term>
<listitem><para>
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NOMSGID">
-<term>MSG_NOMSGID line %1: message definition line found without a message ID</term>
+<varlistentry id="LOG_PREFIX_INVALID_ARG">
+<term>LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
<listitem><para>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NOMSGTXT">
-<term>MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</term>
+<varlistentry id="LOG_READING_LOCAL_FILE">
+<term>LOG_READING_LOCAL_FILE reading local message file %1</term>
<listitem><para>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
+This is an informational message output by BIND 10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSEXTRARG">
-<term>MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</term>
+<varlistentry id="LOG_READ_ERROR">
+<term>LOG_READ_ERROR error reading from message file %1: %2</term>
<listitem><para>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
+The specified error was encountered reading from the named message file.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSINVARG">
-<term>MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<varlistentry id="LOG_UNRECOGNISED_DIRECTIVE">
+<term>LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</term>
<listitem><para>
-The $NAMESPACE argument should be a valid C++ namespace. The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct. The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSNOARG">
-<term>MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</term>
+<varlistentry id="LOG_WRITE_ERROR">
+<term>LOG_WRITE_ERROR error writing to %1: %2</term>
<listitem><para>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
+The specified error was encountered by the message compiler when writing
+to the named output file.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_OPENIN">
-<term>MSG_OPENIN unable to open message file %1 for input: %2</term>
+<varlistentry id="NOTIFY_OUT_INVALID_ADDRESS">
+<term>NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</term>
<listitem><para>
-The program was not able to open the specified input message file for the
-reason given.
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_OPENOUT">
-<term>MSG_OPENOUT unable to open %1 for output: %2</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_OPCODE">
+<term>NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3</term>
<listitem><para>
-The program was not able to open the specified output file for the reason
-given.
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_PRFEXTRARG">
-<term>MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_QID">
+<term>NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4</term>
<listitem><para>
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created. This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_PRFINVARG">
-<term>MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_QUERY_NAME">
+<term>NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4</term>
<listitem><para>
-The $PREFIX argument is used in a symbol name in a C++ header file. As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_RDLOCMES">
-<term>MSG_RDLOCMES reading local message file %1</term>
+<varlistentry id="NOTIFY_OUT_REPLY_QR_NOT_SET">
+<term>NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2</term>
<listitem><para>
-This is an informational message output by BIND10 when it starts to read a
-local message file. (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_READERR">
-<term>MSG_READERR error reading from message file %1: %2</term>
+<varlistentry id="NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION">
+<term>NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1</term>
<listitem><para>
-The specified error was encountered reading from the named message file.
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_RETRY_EXCEEDED">
+<term>NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded</term>
+<listitem><para>
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_SENDING_NOTIFY">
+<term>NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2</term>
+<listitem><para>
+A notify message is sent to the secondary nameserver at the given
+address.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_UNRECDIR">
-<term>MSG_UNRECDIR line %1: unrecognised directive '%2'</term>
+<varlistentry id="NOTIFY_OUT_SOCKET_ERROR">
+<term>NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3</term>
<listitem><para>
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_WRITERR">
-<term>MSG_WRITERR error writing to %1: %2</term>
+<varlistentry id="NOTIFY_OUT_SOCKET_RECV_ERROR">
+<term>NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3</term>
<listitem><para>
-The specified error was encountered by the message compiler when writing to
-the named output file.
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_INVRESPSTR">
-<term>NSAS_INVRESPSTR queried for %1 but got invalid response</term>
+<varlistentry id="NOTIFY_OUT_TIMEOUT">
+<term>NOTIFY_OUT_TIMEOUT retry notify to %1#%2</term>
<listitem><para>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for a RR for the
-specified nameserver but received an invalid response. Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_INVRESPTC">
-<term>NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<varlistentry id="NSAS_FIND_NS_ADDRESS">
+<term>NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</term>
<listitem><para>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_LOOKUPCANCEL">
-<term>NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</term>
+<varlistentry id="NSAS_FOUND_ADDRESS">
+<term>NSAS_FOUND_ADDRESS found address %1 for %2</term>
<listitem><para>
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_LOOKUPZONE">
-<term>NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</term>
+<varlistentry id="NSAS_INVALID_RESPONSE">
+<term>NSAS_INVALID_RESPONSE queried for %1 but got invalid response</term>
<listitem><para>
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</para><para>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSADDR">
-<term>NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</term>
+<varlistentry id="NSAS_LOOKUP_CANCEL">
+<term>NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSLKUPFAIL">
-<term>NSAS_NSLKUPFAIL failed to lookup any %1 for %2</term>
+<varlistentry id="NSAS_NS_LOOKUP_FAIL">
+<term>NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver. This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSLKUPSUCC">
-<term>NSAS_NSLKUPSUCC found address %1 for %2</term>
+<varlistentry id="NSAS_SEARCH_ZONE_NS">
+<term>NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_SETRTT">
-<term>NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</term>
+<varlistentry id="NSAS_UPDATE_RTT">
+<term>NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</term>
<listitem><para>
A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver. The RTT has been updated using the value given and the new RTT is
-displayed. (The RTT is subject to a calculation that damps out sudden
-changes. As a result, the new RTT is not necessarily equal to the RTT
-reported.)
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_WRONG_ANSWER">
+<term>NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<listitem><para>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</para><para>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</para></listitem>
</varlistentry>
@@ -1460,16 +3209,16 @@ type> tuple in the cache; instead, the deepest delegation found is indicated.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_FOLLOWCNAME">
-<term>RESLIB_FOLLOWCNAME following CNAME chain to <%1></term>
+<varlistentry id="RESLIB_FOLLOW_CNAME">
+<term>RESLIB_FOLLOW_CNAME following CNAME chain to <%1></term>
<listitem><para>
A debug message, a CNAME response was received and another query is being issued
for the <name, class, type> tuple.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_LONGCHAIN">
-<term>RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
+<varlistentry id="RESLIB_LONG_CHAIN">
+<term>RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
<listitem><para>
A debug message recording that a CNAME response has been received to an upstream
query for the specified question (Previous debug messages will have indicated
@@ -1479,26 +3228,26 @@ is where on CNAME points to another) and so an error is being returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NONSRRSET">
-<term>RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></term>
+<varlistentry id="RESLIB_NO_NS_RRSET">
+<term>RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></term>
<listitem><para>
A debug message, this indicates that a response was received for the specified
-query and was categorised as a referral. However, the received message did
+query and was categorized as a referral. However, the received message did
not contain any NS RRsets. This may indicate a programming error in the
response classification code.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NSASLOOK">
-<term>RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</term>
+<varlistentry id="RESLIB_NSAS_LOOKUP">
+<term>RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</term>
<listitem><para>
A debug message, the RunningQuery object is querying the NSAS for the
nameservers for the specified zone.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NXDOMRR">
-<term>RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
+<varlistentry id="RESLIB_NXDOM_NXRR">
+<term>RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
<listitem><para>
A debug message recording that either a NXDOMAIN or an NXRRSET response has
been received to an upstream query for the specified question. Previous debug
@@ -1514,8 +3263,8 @@ are no retries left, an error will be reported.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_PROTOCOLRTRY">
-<term>RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</term>
+<varlistentry id="RESLIB_PROTOCOL_RETRY">
+<term>RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</term>
<listitem><para>
A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
@@ -1523,33 +3272,16 @@ repeated query, there will be the indicated number of retries left.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RCODERR">
-<term>RESLIB_RCODERR RCODE indicates error in response to query for <%1></term>
+<varlistentry id="RESLIB_RCODE_ERR">
+<term>RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></term>
<listitem><para>
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_REFERRAL">
-<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
-<listitem><para>
-A debug message recording that a referral response has been received to an
-upstream query for the specified question. Previous debug messages will
-have indicated the server to which the question was sent.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="RESLIB_REFERZONE">
-<term>RESLIB_REFERZONE referred to zone %1</term>
-<listitem><para>
-A debug message indicating that the last referral message was to the specified
-zone.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="RESLIB_RESCAFND">
-<term>RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RECQ_CACHE_FIND">
+<term>RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</term>
<listitem><para>
This is a debug message and indicates that a RecursiveQuery object found the
the specified <name, class, type> tuple in the cache. The instance number
@@ -1558,8 +3290,8 @@ been called.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RESCANOTFND">
-<term>RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RECQ_CACHE_NO_FIND">
+<term>RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
<listitem><para>
This is a debug message and indicates that the look in the cache made by the
RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
@@ -1569,6 +3301,23 @@ been called.
</para></listitem>
</varlistentry>
+<varlistentry id="RESLIB_REFERRAL">
+<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFER_ZONE">
+<term>RESLIB_REFER_ZONE referred to zone %1</term>
+<listitem><para>
+A debug message indicating that the last referral message was to the specified
+zone.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="RESLIB_RESOLVE">
<term>RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</term>
<listitem><para>
@@ -1579,8 +3328,8 @@ message indicates which of the two resolve() methods has been called.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RRSETFND">
-<term>RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RRSET_FOUND">
+<term>RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
<listitem><para>
A debug message, indicating that when RecursiveQuery::resolve queried the
cache, a single RRset was found which was put in the answer. The instance
@@ -1596,16 +3345,16 @@ A debug message giving the round-trip time of the last query and response.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNCAFND">
-<term>RESLIB_RUNCAFND found <%1> in the cache</term>
+<varlistentry id="RESLIB_RUNQ_CACHE_FIND">
+<term>RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</term>
<listitem><para>
This is a debug message and indicates that a RunningQuery object found
the specified <name, class, type> tuple in the cache.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNCALOOK">
-<term>RESLIB_RUNCALOOK looking up up <%1> in the cache</term>
+<varlistentry id="RESLIB_RUNQ_CACHE_LOOKUP">
+<term>RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</term>
<listitem><para>
This is a debug message and indicates that a RunningQuery object has made
a call to its doLookup() method to look up the specified <name, class, type>
@@ -1613,16 +3362,16 @@ tuple, the first action of which will be to examine the cache.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNQUFAIL">
-<term>RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</term>
+<varlistentry id="RESLIB_RUNQ_FAIL">
+<term>RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</term>
<listitem><para>
A debug message indicating that a RunningQuery's failure callback has been
called because all nameservers for the zone in question are unreachable.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNQUSUCC">
-<term>RESLIB_RUNQUSUCC success callback - sending query to %1</term>
+<varlistentry id="RESLIB_RUNQ_SUCCESS">
+<term>RESLIB_RUNQ_SUCCESS success callback - sending query to %1</term>
<listitem><para>
A debug message indicating that a RunningQuery's success callback has been
called because a nameserver has been found, and that a query is being sent
@@ -1630,19 +3379,19 @@ to the specified nameserver.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TESTSERV">
-<term>RESLIB_TESTSERV setting test server to %1(%2)</term>
+<varlistentry id="RESLIB_TEST_SERVER">
+<term>RESLIB_TEST_SERVER setting test server to %1(%2)</term>
<listitem><para>
-This is an internal debugging message and is only generated in unit tests.
-It indicates that all upstream queries from the resolver are being routed to
-the specified server, regardless of the address of the nameserver to which
-the query would normally be routed. As it should never be seen in normal
-operation, it is a warning message instead of a debug message.
+This is a warning message only generated in unit tests. It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed. If seen during normal operation,
+please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TESTUPSTR">
-<term>RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</term>
+<varlistentry id="RESLIB_TEST_UPSTREAM">
+<term>RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</term>
<listitem><para>
This is a debug message and should only be seen in unit tests. A query for
the specified <name, class, type> tuple is being sent to a test nameserver
@@ -1653,13 +3402,13 @@ whose address is given in the message.
<varlistentry id="RESLIB_TIMEOUT">
<term>RESLIB_TIMEOUT query <%1> to %2 timed out</term>
<listitem><para>
-A debug message indicating that the specified query has timed out and as
-there are no retries left, an error will be reported.
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TIMEOUTRTRY">
-<term>RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
+<varlistentry id="RESLIB_TIMEOUT_RETRY">
+<term>RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
<listitem><para>
A debug message indicating that the specified query has timed out and that
the resolver is repeating the query to the same nameserver. After this
@@ -1685,308 +3434,374 @@ tuple is being sent to a nameserver whose address is given in the message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_AXFRTCP">
-<term>RESOLVER_AXFRTCP AXFR request received over TCP</term>
+<varlistentry id="RESOLVER_AXFR_TCP">
+<term>RESOLVER_AXFR_TCP AXFR request received over TCP</term>
<listitem><para>
-A debug message, the resolver received a NOTIFY message over TCP. The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_AXFRUDP">
-<term>RESOLVER_AXFRUDP AXFR request received over UDP</term>
+<varlistentry id="RESOLVER_AXFR_UDP">
+<term>RESOLVER_AXFR_UDP AXFR request received over UDP</term>
<listitem><para>
-A debug message, the resolver received a NOTIFY message over UDP. The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CLTMOSMALL">
-<term>RESOLVER_CLTMOSMALL client timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_CLIENT_TIME_SMALL">
+<term>RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</term>
<listitem><para>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGCHAN">
-<term>RESOLVER_CONFIGCHAN configuration channel created</term>
+<varlistentry id="RESOLVER_CONFIG_CHANNEL">
+<term>RESOLVER_CONFIG_CHANNEL configuration channel created</term>
<listitem><para>
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGERR">
-<term>RESOLVER_CONFIGERR error in configuration: %1</term>
+<varlistentry id="RESOLVER_CONFIG_ERROR">
+<term>RESOLVER_CONFIG_ERROR error in configuration: %1</term>
<listitem><para>
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error). The reason for the error, given as a parameter in the message,
-will give more details.
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGLOAD">
-<term>RESOLVER_CONFIGLOAD configuration loaded</term>
+<varlistentry id="RESOLVER_CONFIG_LOADED">
+<term>RESOLVER_CONFIG_LOADED configuration loaded</term>
<listitem><para>
-A debug message, output when the resolver configuration has been successfully
-loaded.
+This is a debug message output when the resolver configuration has been
+successfully loaded.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGUPD">
-<term>RESOLVER_CONFIGUPD configuration updated: %1</term>
+<varlistentry id="RESOLVER_CONFIG_UPDATED">
+<term>RESOLVER_CONFIG_UPDATED configuration updated: %1</term>
<listitem><para>
-A debug message, the configuration has been updated with the specified
-information.
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
</para></listitem>
</varlistentry>
<varlistentry id="RESOLVER_CREATED">
<term>RESOLVER_CREATED main resolver object created</term>
<listitem><para>
-A debug message, output when the Resolver() object has been created.
+This is a debug message indicating that the main resolver object has
+been created.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_DNSMSGRCVD">
-<term>RESOLVER_DNSMSGRCVD DNS message received: %1</term>
+<varlistentry id="RESOLVER_DNS_MESSAGE_RECEIVED">
+<term>RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</term>
<listitem><para>
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
+This is a debug message from the resolver listing the contents of a
+received DNS message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_DNSMSGSENT">
-<term>RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</term>
+<varlistentry id="RESOLVER_DNS_MESSAGE_SENT">
+<term>RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</term>
<listitem><para>
-A debug message, this contains details of the response sent back to the querying
-system.
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
</para></listitem>
</varlistentry>
<varlistentry id="RESOLVER_FAILED">
<term>RESOLVER_FAILED resolver failed, reason: %1</term>
<listitem><para>
-This is an error message output when an unhandled exception is caught by the
-resolver. All it can do is to shut down.
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_FWDADDR">
-<term>RESOLVER_FWDADDR setting forward address %1(%2)</term>
+<varlistentry id="RESOLVER_FORWARD_ADDRESS">
+<term>RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</term>
<listitem><para>
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_FWDQUERY">
-<term>RESOLVER_FWDQUERY processing forward query</term>
+<varlistentry id="RESOLVER_FORWARD_QUERY">
+<term>RESOLVER_FORWARD_QUERY processing forward query</term>
<listitem><para>
-The received query has passed all checks and is being forwarded to upstream
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
servers.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_HDRERR">
-<term>RESOLVER_HDRERR message received, exception when processing header: %1</term>
+<varlistentry id="RESOLVER_HEADER_ERROR">
+<term>RESOLVER_HEADER_ERROR message received, exception when processing header: %1</term>
<listitem><para>
-A debug message noting that an exception occurred during the processing of
-a received packet. The packet has been dropped.
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
</para></listitem>
</varlistentry>
<varlistentry id="RESOLVER_IXFR">
<term>RESOLVER_IXFR IXFR request received</term>
<listitem><para>
-The resolver received a NOTIFY message over TCP. The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_LKTMOSMALL">
-<term>RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_LOOKUP_TIME_SMALL">
+<term>RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</term>
<listitem><para>
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NFYNOTAUTH">
-<term>RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</term>
+<varlistentry id="RESOLVER_MESSAGE_ERROR">
+<term>RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</term>
<listitem><para>
-The resolver received a NOTIFY message. As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NORMQUERY">
-<term>RESOLVER_NORMQUERY processing normal query</term>
+<varlistentry id="RESOLVER_NEGATIVE_RETRIES">
+<term>RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</term>
<listitem><para>
-The received query has passed all checks and is being processed by the resolver.
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOROOTADDR">
-<term>RESOLVER_NOROOTADDR no root addresses available</term>
+<varlistentry id="RESOLVER_NON_IN_PACKET">
+<term>RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</term>
<listitem><para>
-A warning message during startup, indicates that no root addresses have been
-set. This may be because the resolver will get them from a priming query.
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOTIN">
-<term>RESOLVER_NOTIN non-IN class request received, returning REFUSED message</term>
+<varlistentry id="RESOLVER_NORMAL_QUERY">
+<term>RESOLVER_NORMAL_QUERY processing normal query</term>
<listitem><para>
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOTONEQUES">
-<term>RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</term>
+<varlistentry id="RESOLVER_NOTIFY_RECEIVED">
+<term>RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</term>
<listitem><para>
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message. This is a malformed
-message, as a DNS query must contain only one question. The resolver will
-return a message to the sender with the RCODE set to FORMERR.
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_OPCODEUNS">
-<term>RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</term>
+<varlistentry id="RESOLVER_NOT_ONE_QUESTION">
+<term>RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</term>
<listitem><para>
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes). It will return a message to the sender
-with the RCODE set to NOTIMP.
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PARSEERR">
-<term>RESOLVER_PARSEERR error parsing received message: %1 - returning %2</term>
+<varlistentry id="RESOLVER_NO_ROOT_ADDRESS">
+<term>RESOLVER_NO_ROOT_ADDRESS no root addresses available</term>
<listitem><para>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded). The message parameters give
-a textual description of the problem and the RCODE returned.
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PRINTMSG">
-<term>RESOLVER_PRINTMSG print message command, aeguments are: %1</term>
+<varlistentry id="RESOLVER_PARSE_ERROR">
+<term>RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</term>
<listitem><para>
-This message is logged when a "print_message" command is received over the
-command channel.
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PROTERR">
-<term>RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</term>
+<varlistentry id="RESOLVER_PRINT_COMMAND">
+<term>RESOLVER_PRINT_COMMAND print message command, arguments are: %1</term>
<listitem><para>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded). The message parameters give a textual
-description of the problem and the RCODE returned.
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUSETUP">
-<term>RESOLVER_QUSETUP query setup</term>
+<varlistentry id="RESOLVER_PROTOCOL_ERROR">
+<term>RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</term>
<listitem><para>
-A debug message noting that the resolver is creating a RecursiveQuery object.
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUSHUT">
-<term>RESOLVER_QUSHUT query shutdown</term>
+<varlistentry id="RESOLVER_QUERY_ACCEPTED">
+<term>RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</term>
<listitem><para>
-A debug message noting that the resolver is destroying a RecursiveQuery object.
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUTMOSMALL">
-<term>RESOLVER_QUTMOSMALL query timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_QUERY_DROPPED">
+<term>RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</term>
<listitem><para>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RECURSIVE">
-<term>RESOLVER_RECURSIVE running in recursive mode</term>
+<varlistentry id="RESOLVER_QUERY_REJECTED">
+<term>RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</term>
<listitem><para>
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RECVMSG">
-<term>RESOLVER_RECVMSG resolver has received a DNS message</term>
+<varlistentry id="RESOLVER_QUERY_SETUP">
+<term>RESOLVER_QUERY_SETUP query setup</term>
<listitem><para>
-A debug message indicating that the resolver has received a message. Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_SHUTDOWN">
+<term>RESOLVER_QUERY_SHUTDOWN query shutdown</term>
+<listitem><para>
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_TIME_SMALL">
+<term>RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</term>
+<listitem><para>
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RETRYNEG">
-<term>RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</term>
+<varlistentry id="RESOLVER_RECEIVED_MESSAGE">
+<term>RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</term>
<listitem><para>
-An error message indicating that the resolver configuration has specified a
-negative retry count. Only zero or positive values are valid.
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_ROOTADDR">
-<term>RESOLVER_ROOTADDR setting root address %1(%2)</term>
+<varlistentry id="RESOLVER_RECURSIVE">
+<term>RESOLVER_RECURSIVE running in recursive mode</term>
<listitem><para>
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_SERVICE">
-<term>RESOLVER_SERVICE service object created</term>
+<varlistentry id="RESOLVER_SERVICE_CREATED">
+<term>RESOLVER_SERVICE_CREATED service object created</term>
<listitem><para>
-A debug message, output when the main service object (which handles the
-received queries) is created.
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_SETPARAM">
-<term>RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
+<varlistentry id="RESOLVER_SET_PARAMS">
+<term>RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
<listitem><para>
-A debug message, lists the parameters associated with the message. These are:
+This debug message lists the parameters being set for the resolver. These are:
query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers. Client timeout: the interval to resolver a query by
+to upstream servers. Client timeout: the interval to resolve a query by
a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
+whilst continuing to resolve the query. Lookup timeout: the time at which the
resolver gives up trying to resolve a query. Retry count: the number of times
the resolver will retry a query to an upstream server if it gets a timeout.
</para><para>
The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
upstream nameservers. Even if none of these queries timeout, the total time
taken to perform all the queries may exceed the client timeout. When this
happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache. However,
-there comes a time - the lookup timeout - when even the resolve gives up.
+with the resolution process; data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
At this point it will wait for pending upstream queries to complete or
timeout and drop the query.
</para></listitem>
</varlistentry>
+<varlistentry id="RESOLVER_SET_QUERY_ACL">
+<term>RESOLVER_SET_QUERY_ACL query ACL is configured</term>
+<listitem><para>
+This debug message is generated when a new query ACL is configured for
+the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SET_ROOT_ADDRESS">
+<term>RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</term>
+<listitem><para>
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="RESOLVER_SHUTDOWN">
<term>RESOLVER_SHUTDOWN resolver shutdown complete</term>
<listitem><para>
-This information message is output when the resolver has shut down.
+This informational message is output when the resolver has shut down.
</para></listitem>
</varlistentry>
@@ -2005,11 +3820,982 @@ An informational message, this is output when the resolver starts up.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_UNEXRESP">
-<term>RESOLVER_UNEXRESP received unexpected response, ignoring</term>
+<varlistentry id="RESOLVER_UNEXPECTED_RESPONSE">
+<term>RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</term>
+<listitem><para>
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_UNSUPPORTED_OPCODE">
+<term>RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</term>
+<listitem><para>
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESSES_NOT_LIST">
+<term>SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1</term>
+<listitem><para>
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_FAIL">
+<term>SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)</term>
+<listitem><para>
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_MISSING">
+<term>SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1</term>
+<listitem><para>
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_TYPE">
+<term>SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1</term>
+<listitem><para>
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_UNRECOVERABLE">
+<term>SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)</term>
+<listitem><para>
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+</para><para>
+The condition indicates problems with the server and/or the system on
+which it is running. The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_VALUE">
+<term>SRVCOMM_ADDRESS_VALUE address to set: %1#%2</term>
+<listitem><para>
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_DEINIT">
+<term>SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring</term>
+<listitem><para>
+Debug message indicating that the server is deinitializing the TSIG keyring.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_INIT">
+<term>SRVCOMM_KEYS_INIT initializing TSIG keyring</term>
+<listitem><para>
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_UPDATE">
+<term>SRVCOMM_KEYS_UPDATE updating TSIG keyring</term>
+<listitem><para>
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_PORT_RANGE">
+<term>SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)</term>
+<listitem><para>
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_SET_LISTEN">
+<term>SRVCOMM_SET_LISTEN setting addresses to listen to</term>
+<listitem><para>
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_BAD_OPTION_VALUE">
+<term>STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1</term>
+<listitem><para>
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CC_SESSION_ERROR">
+<term>STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1</term>
+<listitem><para>
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CLOSING">
+<term>STATHTTPD_CLOSING closing %1#%2</term>
+<listitem><para>
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CLOSING_CC_SESSION">
+<term>STATHTTPD_CLOSING_CC_SESSION stopping cc session</term>
+<listitem><para>
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_HANDLE_CONFIG">
+<term>STATHTTPD_HANDLE_CONFIG reading configuration: %1</term>
+<listitem><para>
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_SHUTDOWN_COMMAND">
+<term>STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_STATUS_COMMAND">
+<term>STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status</term>
+<listitem><para>
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_UNKNOWN_COMMAND">
+<term>STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</term>
+<listitem><para>
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_ERROR">
+<term>STATHTTPD_SERVER_ERROR HTTP server error: %1</term>
+<listitem><para>
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_INIT_ERROR">
+<term>STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1</term>
+<listitem><para>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SHUTDOWN">
+<term>STATHTTPD_SHUTDOWN shutting down</term>
+<listitem><para>
+The stats-httpd daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STARTED">
+<term>STATHTTPD_STARTED listening on %1#%2</term>
+<listitem><para>
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STARTING_CC_SESSION">
+<term>STATHTTPD_STARTING_CC_SESSION starting cc session</term>
+<listitem><para>
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_START_SERVER_INIT_ERROR">
+<term>STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1</term>
+<listitem><para>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STOPPED_BY_KEYBOARD">
+<term>STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_UNKNOWN_CONFIG_ITEM">
+<term>STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1</term>
+<listitem><para>
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_BAD_OPTION_VALUE">
+<term>STATS_BAD_OPTION_VALUE bad command line argument: %1</term>
+<listitem><para>
+The stats module was called with a bad command-line argument and will
+not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_CC_SESSION_ERROR">
+<term>STATS_CC_SESSION_ERROR error connecting to message bus: %1</term>
+<listitem><para>
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_NEW_CONFIG">
+<term>STATS_RECEIVED_NEW_CONFIG received new configuration: %1</term>
+<listitem><para>
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_REMOVE_COMMAND">
+<term>STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</term>
+<listitem><para>
+A remove command for the given name was sent to the stats module, and
+the given statistics value will now be removed. It will not appear in
+statistics reports until it appears in a statistics update from a
+module again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_RESET_COMMAND">
+<term>STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</term>
+<listitem><para>
+The stats module received a command to clear all collected statistics.
+The data is cleared until it receives an update from the modules again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOW_ALL_COMMAND">
+<term>STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</term>
+<listitem><para>
+The stats module received a command to show all statistics that it has
+collected.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOW_NAME_COMMAND">
+<term>STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1</term>
+<listitem><para>
+The stats module received a command to show the statistics that it has
+collected for the given item.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHUTDOWN_COMMAND">
+<term>STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+A shutdown command was sent to the stats module and it will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_STATUS_COMMAND">
+<term>STATS_RECEIVED_STATUS_COMMAND received command to return status</term>
+<listitem><para>
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_UNKNOWN_COMMAND">
+<term>STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</term>
+<listitem><para>
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_SEND_REQUEST_BOSS">
+<term>STATS_SEND_REQUEST_BOSS requesting boss to send statistics</term>
+<listitem><para>
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_STOPPED_BY_KEYBOARD">
+<term>STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_UNKNOWN_COMMAND_IN_SPEC">
+<term>STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1</term>
+<listitem><para>
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_DATABASE_FAILURE">
+<term>XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_INTERNAL_FAILURE">
+<term>XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_FAILURE">
+<term>XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_STARTED">
+<term>XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</term>
+<listitem><para>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_SUCCESS">
+<term>XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</term>
+<listitem><para>
+The AXFR transfer of the given zone was successfully completed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_ADDR_FORMAT">
+<term>XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</term>
+<listitem><para>
+The given master address is not a valid IP address.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_PORT_FORMAT">
+<term>XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</term>
+<listitem><para>
+The master port as read from the configuration is not a valid port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_TSIG_KEY_STRING">
+<term>XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_ZONE_CLASS">
+<term>XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</term>
+<listitem><para>
+The zone class as read from the configuration is not a valid DNS class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CC_SESSION_ERROR">
+<term>XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_COMMAND_ERROR">
+<term>XFRIN_COMMAND_ERROR error while executing command '%1': %2</term>
+<listitem><para>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CONNECT_MASTER">
+<term>XFRIN_CONNECT_MASTER error connecting to master at %1: %2</term>
+<listitem><para>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_IMPORT_DNS">
+<term>XFRIN_IMPORT_DNS error importing python DNS module: %1</term>
+<listitem><para>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR">
+<term>XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</term>
+<listitem><para>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER">
+<term>XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</term>
+<listitem><para>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_RETRANSFER_UNKNOWN_ZONE">
+<term>XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</term>
+<listitem><para>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STARTING">
+<term>XFRIN_STARTING starting resolver with command line '%1'</term>
+<listitem><para>
+An informational message, this is output when the resolver starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STOPPED_BY_KEYBOARD">
+<term>XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_UNKNOWN_ERROR">
+<term>XFRIN_UNKNOWN_ERROR unknown error: %1</term>
+<listitem><para>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_DONE">
+<term>XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</term>
+<listitem><para>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_ERROR">
+<term>XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</term>
+<listitem><para>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_FAILED">
+<term>XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</term>
+<listitem><para>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_STARTED">
+<term>XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</term>
+<listitem><para>
+A transfer out of the given zone has started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_BAD_TSIG_KEY_STRING">
+<term>XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_ERROR">
+<term>XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_TIMEOUT_ERROR">
+<term>XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</term>
+<listitem><para>
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_FETCH_REQUEST_ERROR">
+<term>XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</term>
+<listitem><para>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_HANDLE_QUERY_ERROR">
+<term>XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</term>
+<listitem><para>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IMPORT">
+<term>XFROUT_IMPORT error importing python module: %1</term>
+<listitem><para>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG">
+<term>XFROUT_NEW_CONFIG Update xfrout configuration</term>
+<listitem><para>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG_DONE">
+<term>XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</term>
+<listitem><para>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NOTIFY_COMMAND">
+<term>XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</term>
+<listitem><para>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PARSE_QUERY_ERROR">
+<term>XFROUT_PARSE_QUERY_ERROR error parsing query: %1</term>
+<listitem><para>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PROCESS_REQUEST_ERROR">
+<term>XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</term>
+<listitem><para>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_DROPPED">
+<term>XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</term>
+<listitem><para>
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_REJECTED">
+<term>XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</term>
+<listitem><para>
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVED_SHUTDOWN_COMMAND">
+<term>XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR">
+<term>XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</term>
+<listitem><para>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</term>
+<listitem><para>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</term>
+<listitem><para>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_SOCKET_SELECT_ERROR">
+<term>XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</term>
+<listitem><para>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPED_BY_KEYBOARD">
+<term>XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPING">
+<term>XFROUT_STOPPING the xfrout daemon is shutting down</term>
+<listitem><para>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_UNIX_SOCKET_FILE_IN_USE">
+<term>XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</term>
+<listitem><para>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_CCSESSION_ERROR">
+<term>ZONEMGR_CCSESSION_ERROR command channel session error: %1</term>
+<listitem><para>
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_JITTER_TOO_BIG">
+<term>ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5</term>
+<listitem><para>
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_KEYBOARD_INTERRUPT">
+<term>ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt</term>
+<listitem><para>
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_LOAD_ZONE">
+<term>ZONEMGR_LOAD_ZONE loading zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_MASTER_ADDRESS">
+<term>ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master</term>
+<listitem><para>
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_SOA">
+<term>ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record</term>
+<listitem><para>
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_TIMER_THREAD">
+<term>ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running</term>
+<listitem><para>
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_ZONE_CLASS">
+<term>ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone</term>
+<listitem><para>
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_ZONE_NAME">
+<term>ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone</term>
+<listitem><para>
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_NOTIFY">
+<term>ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_SHUTDOWN">
+<term>ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_UNKNOWN">
+<term>ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'</term>
+<listitem><para>
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_XFRIN_FAILED">
+<term>ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_XFRIN_SUCCESS">
+<term>ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_REFRESH_ZONE">
+<term>ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)</term>
+<listitem><para>
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SELECT_ERROR">
+<term>ZONEMGR_SELECT_ERROR error with select(): %1</term>
+<listitem><para>
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SEND_FAIL">
+<term>ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed</term>
+<listitem><para>
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SESSION_ERROR">
+<term>ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon</term>
+<listitem><para>
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SESSION_TIMEOUT">
+<term>ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon</term>
+<listitem><para>
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SHUTDOWN">
+<term>ZONEMGR_SHUTDOWN zone manager has shut down</term>
+<listitem><para>
+A debug message, output when the zone manager has shut down completely.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_STARTING">
+<term>ZONEMGR_STARTING zone manager starting</term>
+<listitem><para>
+A debug message output when the zone manager starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_TIMER_THREAD_RUNNING">
+<term>ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running</term>
+<listitem><para>
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_FAIL">
+<term>ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_NOTIFIED">
+<term>ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_SUCCESS">
+<term>ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager</term>
<listitem><para>
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
</para></listitem>
</varlistentry>
</variablelist>
diff --git a/src/bin/auth/auth.spec.pre.in b/src/bin/auth/auth.spec.pre.in
index d88ffb5..2ce044e 100644
--- a/src/bin/auth/auth.spec.pre.in
+++ b/src/bin/auth/auth.spec.pre.in
@@ -122,6 +122,24 @@
}
]
}
+ ],
+ "statistics": [
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP ",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
]
}
}
diff --git a/src/bin/auth/b10-auth.8 b/src/bin/auth/b10-auth.8
index 0356683..aedadee 100644
--- a/src/bin/auth/b10-auth.8
+++ b/src/bin/auth/b10-auth.8
@@ -2,12 +2,12 @@
.\" Title: b10-auth
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 8, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-AUTH" "8" "March 8, 2011" "BIND10" "BIND10"
+.TH "B10\-AUTH" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -70,18 +70,6 @@ defines the path to the SQLite3 zone file when using the sqlite datasource\&. Th
/usr/local/var/bind10\-devel/zone\&.sqlite3\&.
.PP
-\fIlisten_on\fR
-is a list of addresses and ports for
-\fBb10\-auth\fR
-to listen on\&. The list items are the
-\fIaddress\fR
-string and
-\fIport\fR
-number\&. By default,
-\fBb10\-auth\fR
-listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
-.PP
-
\fIdatasources\fR
configures data sources\&. The list items include:
\fItype\fR
@@ -114,6 +102,18 @@ In this development version, currently this is only used for the memory data sou
.RE
.PP
+\fIlisten_on\fR
+is a list of addresses and ports for
+\fBb10\-auth\fR
+to listen on\&. The list items are the
+\fIaddress\fR
+string and
+\fIport\fR
+number\&. By default,
+\fBb10\-auth\fR
+listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
+.PP
+
\fIstatistics\-interval\fR
is the timer interval in seconds for
\fBb10\-auth\fR
@@ -164,6 +164,25 @@ immediately\&.
\fBshutdown\fR
exits
\fBb10\-auth\fR\&. (Note that the BIND 10 boss process will restart this service\&.)
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+auth\&.queries\&.tcp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over TCP since startup\&.
+.RE
+.PP
+auth\&.queries\&.udp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over UDP since startup\&.
+.RE
.SH "FILES"
.PP
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index 2b53394..636f437 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 8, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -132,15 +132,6 @@
</para>
<para>
- <varname>listen_on</varname> is a list of addresses and ports for
- <command>b10-auth</command> to listen on.
- The list items are the <varname>address</varname> string
- and <varname>port</varname> number.
- By default, <command>b10-auth</command> listens on port 53
- on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
- </para>
-
- <para>
<varname>datasources</varname> configures data sources.
The list items include:
<varname>type</varname> to optionally choose the data source type
@@ -165,6 +156,15 @@
</para>
<para>
+ <varname>listen_on</varname> is a list of addresses and ports for
+ <command>b10-auth</command> to listen on.
+ The list items are the <varname>address</varname> string
+ and <varname>port</varname> number.
+ By default, <command>b10-auth</command> listens on port 53
+ on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
+ </para>
+
+ <para>
<varname>statistics-interval</varname> is the timer interval
in seconds for <command>b10-auth</command> to share its
statistics information to
@@ -209,6 +209,34 @@
</refsect1>
<refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>auth.queries.tcp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over TCP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>auth.queries.udp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over UDP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para>
<filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 05bcd89..3fe03c8 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -31,7 +31,7 @@ namespace isc {
namespace auth {
void
-Query::getAdditional(const ZoneFinder& zone, const RRset& rrset) const {
+Query::getAdditional(ZoneFinder& zone, const RRset& rrset) const {
RdataIteratorPtr rdata_iterator(rrset.getRdataIterator());
for (; !rdata_iterator->isLast(); rdata_iterator->next()) {
const Rdata& rdata(rdata_iterator->getCurrent());
@@ -47,7 +47,7 @@ Query::getAdditional(const ZoneFinder& zone, const RRset& rrset) const {
}
void
-Query::findAddrs(const ZoneFinder& zone, const Name& qname,
+Query::findAddrs(ZoneFinder& zone, const Name& qname,
const ZoneFinder::FindOptions options) const
{
// Out of zone name
@@ -86,7 +86,7 @@ Query::findAddrs(const ZoneFinder& zone, const Name& qname,
}
void
-Query::putSOA(const ZoneFinder& zone) const {
+Query::putSOA(ZoneFinder& zone) const {
ZoneFinder::FindResult soa_result(zone.find(zone.getOrigin(),
RRType::SOA()));
if (soa_result.code != ZoneFinder::SUCCESS) {
@@ -104,7 +104,7 @@ Query::putSOA(const ZoneFinder& zone) const {
}
void
-Query::getAuthAdditional(const ZoneFinder& zone) const {
+Query::getAuthAdditional(ZoneFinder& zone) const {
// Fill in authority and addtional sections.
ZoneFinder::FindResult ns_result = zone.find(zone.getOrigin(),
RRType::NS());
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index fa023fe..13523e8 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -69,7 +69,7 @@ private:
/// Adds a SOA of the zone into the authority zone of response_.
/// Can throw NoSOA.
///
- void putSOA(const isc::datasrc::ZoneFinder& zone) const;
+ void putSOA(isc::datasrc::ZoneFinder& zone) const;
/// \brief Look up additional data (i.e., address records for the names
/// included in NS or MX records).
@@ -85,7 +85,7 @@ private:
/// query is to be found.
/// \param rrset The RRset (i.e., NS or MX rrset) which require additional
/// processing.
- void getAdditional(const isc::datasrc::ZoneFinder& zone,
+ void getAdditional(isc::datasrc::ZoneFinder& zone,
const isc::dns::RRset& rrset) const;
/// \brief Find address records for a specified name.
@@ -104,7 +104,7 @@ private:
/// be found.
/// \param qname The name in rrset RDATA.
/// \param options The search options.
- void findAddrs(const isc::datasrc::ZoneFinder& zone,
+ void findAddrs(isc::datasrc::ZoneFinder& zone,
const isc::dns::Name& qname,
const isc::datasrc::ZoneFinder::FindOptions options
= isc::datasrc::ZoneFinder::FIND_DEFAULT) const;
@@ -127,7 +127,7 @@ private:
///
/// \param zone The \c ZoneFinder through which the NS and additional data
/// for the query are to be found.
- void getAuthAdditional(const isc::datasrc::ZoneFinder& zone) const;
+ void getAuthAdditional(isc::datasrc::ZoneFinder& zone) const;
public:
/// Constructor from query parameters.
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index 6a75856..68f0a1d 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -122,12 +122,12 @@ public:
masterLoad(zone_stream, origin_, rrclass_,
boost::bind(&MockZoneFinder::loadRRset, this, _1));
}
- virtual const isc::dns::Name& getOrigin() const { return (origin_); }
- virtual const isc::dns::RRClass& getClass() const { return (rrclass_); }
+ virtual isc::dns::Name getOrigin() const { return (origin_); }
+ virtual isc::dns::RRClass getClass() const { return (rrclass_); }
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
RRsetList* target = NULL,
- const FindOptions options = FIND_DEFAULT) const;
+ const FindOptions options = FIND_DEFAULT);
// If false is passed, it makes the zone broken as if it didn't have the
// SOA.
@@ -165,7 +165,7 @@ private:
ZoneFinder::FindResult
MockZoneFinder::find(const Name& name, const RRType& type,
- RRsetList* target, const FindOptions options) const
+ RRsetList* target, const FindOptions options)
{
// Emulating a broken zone: mandatory apex RRs are missing if specifically
// configured so (which are rare cases).
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index d5ab905..1af4f14 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -2,12 +2,12 @@
.\" Title: bind10
.\" Author: [see the "AUTHORS" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 31, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "BIND10" "8" "March 31, 2011" "BIND10" "BIND10"
+.TH "BIND10" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -107,6 +107,18 @@ Display more about what is going on for
\fBbind10\fR
and its child processes\&.
.RE
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+bind10\&.boot_time
+.RS 4
+The date and time that the
+\fBbind10\fR
+process started\&. This is represented in ISO 8601 format\&.
+.RE
.SH "SEE ALSO"
.PP
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 1128264..b101ba8 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 31, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -217,6 +217,30 @@ The default is the basename of ARG 0.
<!--
TODO: configuration section
-->
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>bind10.boot_time</term>
+ <listitem><para>
+ The date and time that the <command>bind10</command>
+ process started.
+ This is represented in ISO 8601 format.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
<!--
<refsect1>
<title>FILES</title>
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 1184fd1..b4cfac6 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -37,6 +37,17 @@
"command_description": "List the running BIND 10 processes",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
]
}
}
diff --git a/src/bin/bind10/creatorapi.txt b/src/bin/bind10/creatorapi.txt
new file mode 100644
index 0000000..c23d907
--- /dev/null
+++ b/src/bin/bind10/creatorapi.txt
@@ -0,0 +1,123 @@
+Socket creator API
+==================
+
+This API is between Boss and other modules to allow them requesting of sockets.
+For simplicity, we will use the socket creator for all (even non-privileged)
+ports for now, but we should have some function where we can abstract it later.
+
+Goals
+-----
+* Be able to request a socket of any combination IPv4/IPv6 UDP/TCP bound to given
+ port and address (sockets that are not bound to anything can be created
+ without privileges, therefore are not requested from the socket creator).
+* Allow to provide the same socket to multiple modules (eg. multiple running
+ auth servers).
+* Allow releasing the sockets (in case all modules using it give it up,
+ terminate or crash).
+* Allow restricting of the sharing (don't allow shared socket between auth
+ and recursive, as the packets would often get to the wrong application,
+ show error instead).
+* Get the socket to the application.
+
+Transport of sockets
+--------------------
+It seems we are stuck with current msgq for a while and there's a chance the
+new replacement will not be able to send sockets inbound. So, we need another
+channel.
+
+The boss will create a unix-domain socket and listen on it. When something
+requests a socket over the command channel and the socket is created, some kind
+of token is returned to the application (which will represent the future
+socket). The application then connects to the unix-domain socket, sends the
+token over the connection (so Boss will know which socket to send there, in case
+multiple applications ask for sockets simultaneously) and Boss sends the socket
+in return.
+
+In theory, we could send the requests directly over the unix-domain
+socket, but it has two disadvantages:
+* The msgq handles serializing/deserializing of structured
+ information (like the parameters to be used), we would have to do it
+ manually on the socket.
+* We could place some kind of security in front of msgq (in case file
+ permissions are not enough, for example if they are not honored on
+ socket files, as indicated in the first paragraph of:
+ http://lkml.indiana.edu/hypermail/linux/kernel/0505.2/0008.html).
+ The socket would have to be secured separately. With the tokens,
+ there's some level of security already - someone not having the
+ token can't request a priviledged socket.
+
+Caching of sockets
+------------------
+To allow sending the same socket to multiple application, the Boss process will
+hold a cache. Each socket that is created and sent is kept open in Boss and
+preserved there as well. A reference count is kept with each of them.
+
+When another application asks for the same socket, it is simply sent from the
+cache instead of creating it again by the creator.
+
+When application gives the socket willingly (by sending a message over the
+command channel), the reference count can be decreased without problems. But
+when the application terminates or crashes, we need to decrease it as well.
+There's a problem, since we don't know which command channel connection (eg.
+lname) belongs to which PID. Furthermore, the applications don't need to be
+started by boss.
+
+There are two possibilities:
+* Let the msgq send messages about disconnected clients (eg. group message to
+ some name). This one is better if we want to migrate to dbus, since dbus
+ already has this capability as well as sending the sockets inbound (at least it
+ seems so on unix) and we could get rid of the unix-domain socket completely.
+* Keep the unix-domain connections open forever. Boss can remember which socket
+ was sent to which connection and when the connection closes (because the
+ application crashed), it can drop all the references on the sockets. This
+ seems easier to implement.
+
+The commands
+------------
+* Command to release a socket. This one would have single parameter, the token
+ used to get the socket. After this, boss would decrease its reference count
+ and if it drops to zero, close its own copy of the socket. This should be used
+ when the module stops using the socket (and after closes it). The
+ library could remember the file-descriptor to token mapping (for
+ common applications that don't request the same socket multiple
+ times in parallel).
+* Command to request a socket. It would have parameters to specify which socket
+ (IP address, address family, port) and how to allow sharing. Sharing would be
+ one of:
+ - None
+ - Same kind of application (however, it is not entirely clear what
+ this means, in case it won't work out intuitively, we'll need to
+ define it somehow)
+ - Any kind of application
+ And a kind of application would be provided, to decide if the sharing is
+ possible (eg. if auth allows sharing with the same kind and something else
+ allows sharing with anything, the sharing is not possible, two auths can).
+
+ It would return either error (the socket can't be created or sharing is not
+ possible) or the token. Then there would be some time for the application to
+ pick up the requested socket.
+
+Examples
+--------
+We probably would have a library with blocking calls to request the
+sockets, so a code could look like:
+
+(socket_fd, token) = request_socket(address, port, 'UDP', SHARE_SAMENAME, 'test-application')
+sock = socket.fromfd(socket_fd)
+
+# Some sock.send and sock.recv stuff here
+
+sock.close()
+release_socket(socket_fd) # or release_socket(token)
+
+Known limitations
+-----------------
+Currently the socket creator doesn't support specifying any socket
+options. If it turns out there are any options that need to be set
+before bind(), we'll need to extend it (and extend the protocol as
+well). If we want to support them, we'll have to solve a possible
+conflict (what to do when two applications request the same socket and
+want to share it, but want different options).
+
+The current socket creator doesn't know raw sockets, but if they are
+needed, it should be easy to add.
diff --git a/src/bin/resolver/b10-resolver.8 b/src/bin/resolver/b10-resolver.8
index 849092c..9161ec2 100644
--- a/src/bin/resolver/b10-resolver.8
+++ b/src/bin/resolver/b10-resolver.8
@@ -2,12 +2,12 @@
.\" Title: b10-resolver
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: February 17, 2011
+.\" Date: August 17, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-RESOLVER" "8" "February 17, 2011" "BIND10" "BIND10"
+.TH "B10\-RESOLVER" "8" "August 17, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -54,7 +54,7 @@ must be either a valid numeric user ID or a valid user name\&. By default the da
.PP
\fB\-v\fR
.RS 4
-Enabled verbose mode\&. This enables diagnostic messages to STDERR\&.
+Enable verbose mode\&. This sets logging to the maximum debugging level\&.
.RE
.SH "CONFIGURATION AND COMMANDS"
.PP
@@ -77,6 +77,25 @@ string and
number\&. The defaults are address ::1 port 53 and address 127\&.0\&.0\&.1 port 53\&.
.PP
+
+
+
+
+
+\fIquery_acl\fR
+is a list of query access control rules\&. The list items are the
+\fIaction\fR
+string and the
+\fIfrom\fR
+or
+\fIkey\fR
+strings\&. The possible actions are ACCEPT, REJECT and DROP\&. The
+\fIfrom\fR
+is a remote (source) IPv4 or IPv6 address or special keyword\&. The
+\fIkey\fR
+is a TSIG key name\&. The default configuration accepts queries from 127\&.0\&.0\&.1 and ::1\&.
+.PP
+
\fIretries\fR
is the number of times to retry (resend query) after a query timeout (\fItimeout_query\fR)\&. The default is 3\&.
.PP
@@ -88,7 +107,7 @@ to use directly as root servers to start resolving\&. The list items are the
\fIaddress\fR
string and
\fIport\fR
-number\&. If empty, a hardcoded address for F\-root (192\&.5\&.5\&.241) is used\&.
+number\&. By default, a hardcoded address for l\&.root\-servers\&.net (199\&.7\&.83\&.42 or 2001:500:3::42) is used\&.
.PP
\fItimeout_client\fR
@@ -121,8 +140,7 @@ BIND 10 Guide\&.
.PP
The
\fBb10\-resolver\fR
-daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&.
-
+daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&. Caching was implemented in February 2011\&. Access control was introduced in June 2011\&.
.SH "COPYRIGHT"
.br
Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/resolver/b10-resolver.xml b/src/bin/resolver/b10-resolver.xml
index bdf4f8a..75cced7 100644
--- a/src/bin/resolver/b10-resolver.xml
+++ b/src/bin/resolver/b10-resolver.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>February 17, 2011</date>
+ <date>August 17, 2011</date>
</refentryinfo>
<refmeta>
@@ -99,11 +99,14 @@
</listitem>
</varlistentry>
+<!-- TODO: this needs to be fixed as -v on command line
+should imply stdout or stderr output also -->
+<!-- TODO: can this -v be overidden by configuration or bindctl? -->
<varlistentry>
<term><option>-v</option></term>
<listitem><para>
- Enabled verbose mode. This enables diagnostic messages to
- STDERR.
+ Enable verbose mode.
+ This sets logging to the maximum debugging level.
</para></listitem>
</varlistentry>
@@ -147,6 +150,22 @@ once that is merged you can for instance do 'config add Resolver/forward_address
</para>
<para>
+<!-- TODO: need more explanation or point to guide. -->
+<!-- TODO: what about a netmask or cidr? -->
+<!-- TODO: document "key" -->
+<!-- TODO: where are the TSIG keys defined? -->
+<!-- TODO: key and from are mutually exclusive? what if both defined? -->
+ <varname>query_acl</varname> is a list of query access control
+ rules. The list items are the <varname>action</varname> string
+ and the <varname>from</varname> or <varname>key</varname> strings.
+ The possible actions are ACCEPT, REJECT and DROP.
+ The <varname>from</varname> is a remote (source) IPv4 or IPv6
+ address or special keyword.
+ The <varname>key</varname> is a TSIG key name.
+ The default configuration accepts queries from 127.0.0.1 and ::1.
+ </para>
+
+ <para>
<varname>retries</varname> is the number of times to retry
(resend query) after a query timeout
(<varname>timeout_query</varname>).
@@ -159,8 +178,10 @@ once that is merged you can for instance do 'config add Resolver/forward_address
root servers to start resolving.
The list items are the <varname>address</varname> string
and <varname>port</varname> number.
- If empty, a hardcoded address for F-root (192.5.5.241) is used.
+ By default, a hardcoded address for l.root-servers.net
+ (199.7.83.42 or 2001:500:3::42) is used.
</para>
+<!-- TODO: this is broken, see ticket #1184 -->
<para>
<varname>timeout_client</varname> is the number of milliseconds
@@ -234,7 +255,8 @@ once that is merged you can for instance do 'config add Resolver/forward_address
The <command>b10-resolver</command> daemon was first coded in
September 2010. The initial implementation only provided
forwarding. Iteration was introduced in January 2011.
-<!-- TODO: document when caching was added -->
+ Caching was implemented in February 2011.
+ Access control was introduced in June 2011.
<!-- TODO: document when validation was added -->
</para>
</refsect1>
diff --git a/src/bin/stats/b10-stats.8 b/src/bin/stats/b10-stats.8
index ebc9201..98b109b 100644
--- a/src/bin/stats/b10-stats.8
+++ b/src/bin/stats/b10-stats.8
@@ -1,22 +1,13 @@
'\" t
.\" Title: b10-stats
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
-.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: Oct 15, 2010
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-STATS" "8" "Oct 15, 2010" "BIND10" "BIND10"
-.\" -----------------------------------------------------------------
-.\" * Define some portability stuff
-.\" -----------------------------------------------------------------
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.\" http://bugs.debian.org/507673
-.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.ie \n(.g .ds Aq \(aq
-.el .ds Aq '
+.TH "B10\-STATS" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -47,7 +38,7 @@ and so on\&. It waits for coming data from other modules, then other modules sen
\fBb10\-stats\fR
invokes an internal command for
\fBbind10\fR
-after its initial starting because it\*(Aqs sure to collect statistics data from
+after its initial starting because it\'s sure to collect statistics data from
\fBbind10\fR\&.
.SH "OPTIONS"
.PP
@@ -59,6 +50,84 @@ This
\fBb10\-stats\fR
switches to verbose mode\&. It sends verbose messages to STDOUT\&.
.RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The
+\fBb10\-stats\fR
+command does not have any configurable settings\&.
+.PP
+The configuration commands are:
+.PP
+
+
+\fBremove\fR
+removes the named statistics name and data\&.
+.PP
+
+
+\fBreset\fR
+will reset all statistics data to default values except for constant names\&. This may re\-add previously removed statistics names\&.
+.PP
+
+\fBset\fR
+.PP
+
+\fBshow\fR
+will send the statistics data in JSON format\&. By default, it outputs all the statistics data it has collected\&. An optional item name may be specified to receive individual output\&.
+.PP
+
+\fBshutdown\fR
+will shutdown the
+\fBb10\-stats\fR
+process\&. (Note that the
+\fBbind10\fR
+parent may restart it\&.)
+.PP
+
+\fBstatus\fR
+simply indicates that the daemon is running\&.
+.SH "STATISTICS DATA"
+.PP
+The
+\fBb10\-stats\fR
+daemon contains these statistics:
+.PP
+report_time
+.RS 4
+The latest report date and time in ISO 8601 format\&.
+.RE
+.PP
+stats\&.boot_time
+.RS 4
+The date and time when this daemon was started in ISO 8601 format\&. This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.
+.RE
+.PP
+stats\&.last_update_time
+.RS 4
+The date and time (in ISO 8601 format) when this daemon last received data from another component\&.
+.RE
+.PP
+stats\&.lname
+.RS 4
+This is the name used for the
+\fBb10\-msgq\fR
+command\-control channel\&. (This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.)
+.RE
+.PP
+stats\&.start_time
+.RS 4
+This is the date and time (in ISO 8601 format) when this daemon started collecting data\&.
+.RE
+.PP
+stats\&.timestamp
+.RS 4
+The current date and time represented in seconds since UNIX epoch (1970\-01\-01T0 0:00:00Z) with precision (delimited with a period) up to one hundred thousandth of second\&.
+.RE
+.PP
+See other manual pages for explanations for their statistics that are kept track by
+\fBb10\-stats\fR\&.
.SH "FILES"
.PP
/usr/local/share/bind10\-devel/stats\&.spec
@@ -82,7 +151,7 @@ BIND 10 Guide\&.
.PP
The
\fBb10\-stats\fR
-daemon was initially designed and implemented by Naoki Kambe of JPRS in Oct 2010\&.
+daemon was initially designed and implemented by Naoki Kambe of JPRS in October 2010\&.
.SH "COPYRIGHT"
.br
Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index 19f6f46..9709175 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>Oct 15, 2010</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -67,6 +67,7 @@
it. <command>b10-stats</command> invokes an internal command
for <command>bind10</command> after its initial starting because it's
sure to collect statistics data from <command>bind10</command>.
+<!-- TODO: reword that last sentence? -->
</para>
</refsect1>
@@ -87,6 +88,123 @@
</refsect1>
<refsect1>
+ <title>CONFIGURATION AND COMMANDS</title>
+
+ <para>
+ The <command>b10-stats</command> command does not have any
+ configurable settings.
+ </para>
+
+<!-- TODO: formating -->
+ <para>
+ The configuration commands are:
+ </para>
+
+ <para>
+<!-- TODO: remove is removed in trac930 -->
+ <command>remove</command> removes the named statistics name and data.
+ </para>
+
+ <para>
+<!-- TODO: reset is removed in trac930 -->
+ <command>reset</command> will reset all statistics data to
+ default values except for constant names.
+ This may re-add previously removed statistics names.
+ </para>
+
+ <para>
+ <command>set</command>
+<!-- TODO: document this -->
+ </para>
+
+ <para>
+ <command>show</command> will send the statistics data
+ in JSON format.
+ By default, it outputs all the statistics data it has collected.
+ An optional item name may be specified to receive individual output.
+ </para>
+
+<!-- TODO: document showschema -->
+
+ <para>
+ <command>shutdown</command> will shutdown the
+ <command>b10-stats</command> process.
+ (Note that the <command>bind10</command> parent may restart it.)
+ </para>
+
+ <para>
+ <command>status</command> simply indicates that the daemon is
+ running.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The <command>b10-stats</command> daemon contains these statistics:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>report_time</term>
+<!-- TODO: why not named stats.report_time? -->
+ <listitem><simpara>The latest report date and time in
+ ISO 8601 format.</simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.boot_time</term>
+ <listitem><simpara>The date and time when this daemon was
+ started in ISO 8601 format.
+ This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.last_update_time</term>
+ <listitem><simpara>The date and time (in ISO 8601 format)
+ when this daemon last received data from another component.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.lname</term>
+ <listitem><simpara>This is the name used for the
+ <command>b10-msgq</command> command-control channel.
+ (This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.)
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.start_time</term>
+ <listitem><simpara>This is the date and time (in ISO 8601 format)
+ when this daemon started collecting data.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.timestamp</term>
+ <listitem><simpara>The current date and time represented in
+ seconds since UNIX epoch (1970-01-01T0 0:00:00Z) with
+ precision (delimited with a period) up to
+ one hundred thousandth of second.</simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <para>
+ See other manual pages for explanations for their statistics
+ that are kept track by <command>b10-stats</command>.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para><filename>/usr/local/share/bind10-devel/stats.spec</filename>
<!--TODO: The filename should be computed from prefix-->
@@ -126,7 +244,7 @@
<title>HISTORY</title>
<para>
The <command>b10-stats</command> daemon was initially designed
- and implemented by Naoki Kambe of JPRS in Oct 2010.
+ and implemented by Naoki Kambe of JPRS in October 2010.
</para>
</refsect1>
</refentry><!--
diff --git a/src/bin/stats/stats-schema.spec b/src/bin/stats/stats-schema.spec
index 37e9c1a..5252865 100644
--- a/src/bin/stats/stats-schema.spec
+++ b/src/bin/stats/stats-schema.spec
@@ -54,8 +54,7 @@
"item_optional": false,
"item_default": 0.0,
"item_title": "stats.Timestamp",
- "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)",
- "item_format": "second"
+ "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
},
{
"item_name": "stats.lname",
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
index 25f6b54..635eb48 100644
--- a/src/bin/stats/stats.spec
+++ b/src/bin/stats/stats.spec
@@ -56,6 +56,51 @@
"command_description": "Shut down the stats module",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "report_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Report time",
+ "item_description": "A date time when stats module reports",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when the stats module starts initially or when the stats module restarts",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "last_update_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Last update time",
+ "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "timestamp",
+ "item_type": "real",
+ "item_optional": false,
+ "item_default": 0.0,
+ "item_title": "Timestamp",
+ "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
+ },
+ {
+ "item_name": "lname",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "",
+ "item_title": "Local Name",
+ "item_description": "A localname of stats module given via CC protocol"
+ }
]
}
}
diff --git a/src/bin/stats/tests/isc/config/ccsession.py b/src/bin/stats/tests/isc/config/ccsession.py
index a4e9c37..50f7c1b 100644
--- a/src/bin/stats/tests/isc/config/ccsession.py
+++ b/src/bin/stats/tests/isc/config/ccsession.py
@@ -23,6 +23,7 @@ external module.
import json
import os
+import time
from isc.cc.session import Session
COMMAND_CONFIG_UPDATE = "config_update"
@@ -72,6 +73,9 @@ class ModuleSpecError(Exception):
class ModuleSpec:
def __init__(self, module_spec, check = True):
+ # check only confi_data for testing
+ if check and "config_data" in module_spec:
+ _check_config_spec(module_spec["config_data"])
self._module_spec = module_spec
def get_config_spec(self):
@@ -83,6 +87,91 @@ class ModuleSpec:
def get_module_name(self):
return self._module_spec['module_name']
+def _check_config_spec(config_data):
+ # config data is a list of items represented by dicts that contain
+ # things like "item_name", depending on the type they can have
+ # specific subitems
+ """Checks a list that contains the configuration part of the
+ specification. Raises a ModuleSpecError if there is a
+ problem."""
+ if type(config_data) != list:
+ raise ModuleSpecError("config_data is of type " + str(type(config_data)) + ", not a list of items")
+ for config_item in config_data:
+ _check_item_spec(config_item)
+
+def _check_item_spec(config_item):
+ """Checks the dict that defines one config item
+ (i.e. containing "item_name", "item_type", etc.
+ Raises a ModuleSpecError if there is an error"""
+ if type(config_item) != dict:
+ raise ModuleSpecError("item spec not a dict")
+ if "item_name" not in config_item:
+ raise ModuleSpecError("no item_name in config item")
+ if type(config_item["item_name"]) != str:
+ raise ModuleSpecError("item_name is not a string: " + str(config_item["item_name"]))
+ item_name = config_item["item_name"]
+ if "item_type" not in config_item:
+ raise ModuleSpecError("no item_type in config item")
+ item_type = config_item["item_type"]
+ if type(item_type) != str:
+ raise ModuleSpecError("item_type in " + item_name + " is not a string: " + str(type(item_type)))
+ if item_type not in ["integer", "real", "boolean", "string", "list", "map", "any"]:
+ raise ModuleSpecError("unknown item_type in " + item_name + ": " + item_type)
+ if "item_optional" in config_item:
+ if type(config_item["item_optional"]) != bool:
+ raise ModuleSpecError("item_default in " + item_name + " is not a boolean")
+ if not config_item["item_optional"] and "item_default" not in config_item:
+ raise ModuleSpecError("no default value for non-optional item " + item_name)
+ else:
+ raise ModuleSpecError("item_optional not in item " + item_name)
+ if "item_default" in config_item:
+ item_default = config_item["item_default"]
+ if (item_type == "integer" and type(item_default) != int) or \
+ (item_type == "real" and type(item_default) != float) or \
+ (item_type == "boolean" and type(item_default) != bool) or \
+ (item_type == "string" and type(item_default) != str) or \
+ (item_type == "list" and type(item_default) != list) or \
+ (item_type == "map" and type(item_default) != dict):
+ raise ModuleSpecError("Wrong type for item_default in " + item_name)
+ # TODO: once we have check_type, run the item default through that with the list|map_item_spec
+ if item_type == "list":
+ if "list_item_spec" not in config_item:
+ raise ModuleSpecError("no list_item_spec in list item " + item_name)
+ if type(config_item["list_item_spec"]) != dict:
+ raise ModuleSpecError("list_item_spec in " + item_name + " is not a dict")
+ _check_item_spec(config_item["list_item_spec"])
+ if item_type == "map":
+ if "map_item_spec" not in config_item:
+ raise ModuleSpecError("no map_item_sepc in map item " + item_name)
+ if type(config_item["map_item_spec"]) != list:
+ raise ModuleSpecError("map_item_spec in " + item_name + " is not a list")
+ for map_item in config_item["map_item_spec"]:
+ if type(map_item) != dict:
+ raise ModuleSpecError("map_item_spec element is not a dict")
+ _check_item_spec(map_item)
+ if 'item_format' in config_item and 'item_default' in config_item:
+ item_format = config_item["item_format"]
+ item_default = config_item["item_default"]
+ if not _check_format(item_default, item_format):
+ raise ModuleSpecError(
+ "Wrong format for " + str(item_default) + " in " + str(item_name))
+
+def _check_format(value, format_name):
+ """Check if specified value and format are correct. Return True if
+ is is correct."""
+ # TODO: should be added other format types if necessary
+ time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
+ 'date' : "%Y-%m-%d",
+ 'time' : "%H:%M:%S" }
+ for fmt in time_formats:
+ if format_name == fmt:
+ try:
+ time.strptime(value, time_formats[fmt])
+ return True
+ except (ValueError, TypeError):
+ break
+ return False
+
class ModuleCCSessionError(Exception):
pass
diff --git a/src/bin/xfrin/b10-xfrin.8 b/src/bin/xfrin/b10-xfrin.8
index 3ea2293..7f73213 100644
--- a/src/bin/xfrin/b10-xfrin.8
+++ b/src/bin/xfrin/b10-xfrin.8
@@ -71,6 +71,9 @@ is a list of zones known to the
daemon\&. The list items are:
\fIname\fR
(the zone name),
+\fIclass\fR
+(defaults to
+\(lqIN\(rq),
\fImaster_addr\fR
(the zone master to transfer from),
\fImaster_port\fR
@@ -125,7 +128,7 @@ to define the class (defaults to
\fImaster\fR
to define the IP address of the authoritative server to transfer from, and
\fIport\fR
-to define the port number on the authoritative server (defaults to 53)\&. If the address or port is not specified, it will use the values previously defined in the
+to define the port number on the authoritative server (defaults to 53)\&. If the address or port is not specified, it will use the value previously defined in the
\fIzones\fR
configuration\&.
.PP
diff --git a/src/bin/xfrin/b10-xfrin.xml b/src/bin/xfrin/b10-xfrin.xml
index ea4c724..17840fe 100644
--- a/src/bin/xfrin/b10-xfrin.xml
+++ b/src/bin/xfrin/b10-xfrin.xml
@@ -103,6 +103,7 @@ in separate zonemgr process.
<command>b10-xfrin</command> daemon.
The list items are:
<varname>name</varname> (the zone name),
+ <varname>class</varname> (defaults to <quote>IN</quote>),
<varname>master_addr</varname> (the zone master to transfer from),
<varname>master_port</varname> (defaults to 53), and
<varname>tsig_key</varname> (optional TSIG key to use).
@@ -168,7 +169,7 @@ in separate zonemgr process.
and <varname>port</varname> to define the port number on the
authoritative server (defaults to 53).
If the address or port is not specified, it will use the
- values previously defined in the <varname>zones</varname>
+ value previously defined in the <varname>zones</varname>
configuration.
</para>
<!-- TODO: later hostname for master? -->
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index ad71fe2..9889b80 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -134,6 +134,14 @@
data storage types.
</simpara></note>
+
+<!--
+
+tsig_key_ring list of
+tsig_key string
+
+-->
+
<!-- TODO: formating -->
<para>
The configuration commands are:
diff --git a/src/lib/cache/cache_messages.mes b/src/lib/cache/cache_messages.mes
index 2a68cc2..19102ae 100644
--- a/src/lib/cache/cache_messages.mes
+++ b/src/lib/cache/cache_messages.mes
@@ -124,14 +124,14 @@ the message will not be cached.
Debug message. The requested data was found in the RRset cache. However, it is
expired, so the cache removed it and is going to pretend nothing was found.
-% CACHE_RRSET_INIT initializing RRset cache for %2 RRsets of class %1
+% CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2
Debug message. The RRset cache to hold at most this many RRsets for the given
class is being created.
% CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache
Debug message. The resolver is trying to look up data in the RRset cache.
-% CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3
+% CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache
Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
in the cache.
diff --git a/src/lib/cc/session.cc b/src/lib/cc/session.cc
index 97d5cf1..e0e24cf 100644
--- a/src/lib/cc/session.cc
+++ b/src/lib/cc/session.cc
@@ -119,7 +119,7 @@ private:
void
SessionImpl::establish(const char& socket_file) {
try {
- LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(socket_file);
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(&socket_file);
socket_.connect(asio::local::stream_protocol::endpoint(&socket_file),
error_);
LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISHED);
diff --git a/src/lib/config/module_spec.cc b/src/lib/config/module_spec.cc
index 306c795..bebe695 100644
--- a/src/lib/config/module_spec.cc
+++ b/src/lib/config/module_spec.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium.
+// Copyright (C) 2010, 2011 Internet Systems Consortium.
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -87,6 +87,61 @@ check_config_item_list(ConstElementPtr spec) {
}
}
+// checks whether the given element is a valid statistics specification
+// returns false if the specification is bad
+bool
+check_format(ConstElementPtr value, ConstElementPtr format_name) {
+ typedef std::map<std::string, std::string> format_types;
+ format_types time_formats;
+ // TODO: should be added other format types if necessary
+ time_formats.insert(
+ format_types::value_type("date-time", "%Y-%m-%dT%H:%M:%SZ") );
+ time_formats.insert(
+ format_types::value_type("date", "%Y-%m-%d") );
+ time_formats.insert(
+ format_types::value_type("time", "%H:%M:%S") );
+ BOOST_FOREACH (const format_types::value_type& f, time_formats) {
+ if (format_name->stringValue() == f.first) {
+ struct tm tm;
+ std::vector<char> buf(32);
+ memset(&tm, 0, sizeof(tm));
+ // reverse check
+ return (strptime(value->stringValue().c_str(),
+ f.second.c_str(), &tm) != NULL
+ && strftime(&buf[0], buf.size(),
+ f.second.c_str(), &tm) != 0
+ && strncmp(value->stringValue().c_str(),
+ &buf[0], buf.size()) == 0);
+ }
+ }
+ return (false);
+}
+
+void check_statistics_item_list(ConstElementPtr spec);
+
+void
+check_statistics_item_list(ConstElementPtr spec) {
+ if (spec->getType() != Element::list) {
+ throw ModuleSpecError("statistics is not a list of elements");
+ }
+ BOOST_FOREACH(ConstElementPtr item, spec->listValue()) {
+ check_config_item(item);
+ // additional checks for statistics
+ check_leaf_item(item, "item_title", Element::string, true);
+ check_leaf_item(item, "item_description", Element::string, true);
+ check_leaf_item(item, "item_format", Element::string, false);
+ // checks name of item_format and validation of item_default
+ if (item->contains("item_format")
+ && item->contains("item_default")) {
+ if(!check_format(item->get("item_default"),
+ item->get("item_format"))) {
+ throw ModuleSpecError(
+ "item_default not valid type of item_format");
+ }
+ }
+ }
+}
+
void
check_command(ConstElementPtr spec) {
check_leaf_item(spec, "command_name", Element::string, true);
@@ -116,6 +171,9 @@ check_data_specification(ConstElementPtr spec) {
if (spec->contains("commands")) {
check_command_list(spec->get("commands"));
}
+ if (spec->contains("statistics")) {
+ check_statistics_item_list(spec->get("statistics"));
+ }
}
// checks whether the given element is a valid module specification
@@ -165,6 +223,15 @@ ModuleSpec::getConfigSpec() const {
}
}
+ConstElementPtr
+ModuleSpec::getStatisticsSpec() const {
+ if (module_specification->contains("statistics")) {
+ return (module_specification->get("statistics"));
+ } else {
+ return (ElementPtr());
+ }
+}
+
const std::string
ModuleSpec::getModuleName() const {
return (module_specification->get("module_name")->stringValue());
@@ -186,6 +253,12 @@ ModuleSpec::validateConfig(ConstElementPtr data, const bool full) const {
}
bool
+ModuleSpec::validateStatistics(ConstElementPtr data, const bool full) const {
+ ConstElementPtr spec = module_specification->find("statistics");
+ return (validateSpecList(spec, data, full, ElementPtr()));
+}
+
+bool
ModuleSpec::validateCommand(const std::string& command,
ConstElementPtr args,
ElementPtr errors) const
@@ -223,6 +296,14 @@ ModuleSpec::validateConfig(ConstElementPtr data, const bool full,
return (validateSpecList(spec, data, full, errors));
}
+bool
+ModuleSpec::validateStatistics(ConstElementPtr data, const bool full,
+ ElementPtr errors) const
+{
+ ConstElementPtr spec = module_specification->find("statistics");
+ return (validateSpecList(spec, data, full, errors));
+}
+
ModuleSpec
moduleSpecFromFile(const std::string& file_name, const bool check)
throw(JSONError, ModuleSpecError)
@@ -343,6 +424,14 @@ ModuleSpec::validateItem(ConstElementPtr spec, ConstElementPtr data,
}
}
}
+ if (spec->contains("item_format")) {
+ if (!check_format(data, spec->get("item_format"))) {
+ if (errors) {
+ errors->add(Element::create("Format mismatch"));
+ }
+ return (false);
+ }
+ }
return (true);
}
diff --git a/src/lib/config/module_spec.h b/src/lib/config/module_spec.h
index ab6e273..ce3762f 100644
--- a/src/lib/config/module_spec.h
+++ b/src/lib/config/module_spec.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium.
+// Copyright (C) 2010, 2011 Internet Systems Consortium.
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -71,6 +71,12 @@ namespace isc { namespace config {
/// part of the specification
isc::data::ConstElementPtr getConfigSpec() const;
+ /// Returns the statistics part of the specification as an
+ /// ElementPtr
+ /// \return ElementPtr Shared pointer to the statistics
+ /// part of the specification
+ isc::data::ConstElementPtr getStatisticsSpec() const;
+
/// Returns the full module specification as an ElementPtr
/// \return ElementPtr Shared pointer to the specification
isc::data::ConstElementPtr getFullSpec() const {
@@ -95,6 +101,17 @@ namespace isc { namespace config {
bool validateConfig(isc::data::ConstElementPtr data,
const bool full = false) const;
+ // returns true if the given element conforms to this data
+ // statistics specification
+ /// Validates the given statistics data for this specification.
+ /// \param data The base \c Element of the data to check
+ /// \param full If true, all non-optional statistics parameters
+ /// must be specified.
+ /// \return true if the data conforms to the specification,
+ /// false otherwise.
+ bool validateStatistics(isc::data::ConstElementPtr data,
+ const bool full = false) const;
+
/// Validates the arguments for the given command
///
/// This checks the command and argument against the
@@ -142,6 +159,10 @@ namespace isc { namespace config {
bool validateConfig(isc::data::ConstElementPtr data, const bool full,
isc::data::ElementPtr errors) const;
+ /// errors must be of type ListElement
+ bool validateStatistics(isc::data::ConstElementPtr data, const bool full,
+ isc::data::ElementPtr errors) const;
+
private:
bool validateItem(isc::data::ConstElementPtr spec,
isc::data::ConstElementPtr data,
diff --git a/src/lib/config/tests/ccsession_unittests.cc b/src/lib/config/tests/ccsession_unittests.cc
index 5ea4f32..793fa30 100644
--- a/src/lib/config/tests/ccsession_unittests.cc
+++ b/src/lib/config/tests/ccsession_unittests.cc
@@ -184,7 +184,7 @@ TEST_F(CCSessionTest, session2) {
ConstElementPtr msg;
std::string group, to;
msg = session.getFirstMessage(group, to);
- EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\" } ] }", msg->str());
+ EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\", \"statistics\": [ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ] } ] }", msg->str());
EXPECT_EQ("ConfigManager", group);
EXPECT_EQ("*", to);
EXPECT_EQ(0, session.getMsgQueue()->size());
@@ -231,7 +231,7 @@ TEST_F(CCSessionTest, session3) {
ConstElementPtr msg;
std::string group, to;
msg = session.getFirstMessage(group, to);
- EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\" } ] }", msg->str());
+ EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\", \"statistics\": [ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ] } ] }", msg->str());
EXPECT_EQ("ConfigManager", group);
EXPECT_EQ("*", to);
EXPECT_EQ(1, session.getMsgQueue()->size());
diff --git a/src/lib/config/tests/module_spec_unittests.cc b/src/lib/config/tests/module_spec_unittests.cc
index d642af8..b2ca7b4 100644
--- a/src/lib/config/tests/module_spec_unittests.cc
+++ b/src/lib/config/tests/module_spec_unittests.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2009, 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,8 @@
#include <fstream>
+#include <boost/foreach.hpp>
+
#include <config/tests/data_def_unittests_config.h>
using namespace isc::data;
@@ -57,6 +59,7 @@ TEST(ModuleSpec, ReadingSpecfiles) {
dd = moduleSpecFromFile(specfile("spec2.spec"));
EXPECT_EQ("[ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ]", dd.getCommandsSpec()->str());
+ EXPECT_EQ("[ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ]", dd.getStatisticsSpec()->str());
EXPECT_EQ("Spec2", dd.getModuleName());
EXPECT_EQ("", dd.getModuleDescription());
@@ -64,6 +67,11 @@ TEST(ModuleSpec, ReadingSpecfiles) {
EXPECT_EQ("Spec25", dd.getModuleName());
EXPECT_EQ("Just an empty module", dd.getModuleDescription());
EXPECT_THROW(moduleSpecFromFile(specfile("spec26.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec34.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec35.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec36.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec37.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec38.spec")), ModuleSpecError);
std::ifstream file;
file.open(specfile("spec1.spec").c_str());
@@ -71,6 +79,7 @@ TEST(ModuleSpec, ReadingSpecfiles) {
EXPECT_EQ(dd.getFullSpec()->get("module_name")
->stringValue(), "Spec1");
EXPECT_TRUE(isNull(dd.getCommandsSpec()));
+ EXPECT_TRUE(isNull(dd.getStatisticsSpec()));
std::ifstream file2;
file2.open(specfile("spec8.spec").c_str());
@@ -114,6 +123,12 @@ TEST(ModuleSpec, SpecfileConfigData) {
"commands is not a list of elements");
}
+TEST(ModuleSpec, SpecfileStatistics) {
+ moduleSpecError("spec36.spec", "item_default not valid type of item_format");
+ moduleSpecError("spec37.spec", "statistics is not a list of elements");
+ moduleSpecError("spec38.spec", "item_default not valid type of item_format");
+}
+
TEST(ModuleSpec, SpecfileCommands) {
moduleSpecError("spec17.spec",
"command_name missing in { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\" }");
@@ -137,6 +152,17 @@ dataTest(const ModuleSpec& dd, const std::string& data_file_name) {
}
bool
+statisticsTest(const ModuleSpec& dd, const std::string& data_file_name) {
+ std::ifstream data_file;
+
+ data_file.open(specfile(data_file_name).c_str());
+ ConstElementPtr data = Element::fromJSON(data_file, data_file_name);
+ data_file.close();
+
+ return (dd.validateStatistics(data));
+}
+
+bool
dataTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
ElementPtr errors)
{
@@ -149,6 +175,19 @@ dataTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
return (dd.validateConfig(data, true, errors));
}
+bool
+statisticsTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
+ ElementPtr errors)
+{
+ std::ifstream data_file;
+
+ data_file.open(specfile(data_file_name).c_str());
+ ConstElementPtr data = Element::fromJSON(data_file, data_file_name);
+ data_file.close();
+
+ return (dd.validateStatistics(data, true, errors));
+}
+
TEST(ModuleSpec, DataValidation) {
ModuleSpec dd = moduleSpecFromFile(specfile("spec22.spec"));
@@ -175,6 +214,17 @@ TEST(ModuleSpec, DataValidation) {
EXPECT_EQ("[ \"Unknown item value_does_not_exist\" ]", errors->str());
}
+TEST(ModuleSpec, StatisticsValidation) {
+ ModuleSpec dd = moduleSpecFromFile(specfile("spec33.spec"));
+
+ EXPECT_TRUE(statisticsTest(dd, "data33_1.data"));
+ EXPECT_FALSE(statisticsTest(dd, "data33_2.data"));
+
+ ElementPtr errors = Element::createList();
+ EXPECT_FALSE(statisticsTestWithErrors(dd, "data33_2.data", errors));
+ EXPECT_EQ("[ \"Format mismatch\", \"Format mismatch\", \"Format mismatch\" ]", errors->str());
+}
+
TEST(ModuleSpec, CommandValidation) {
ModuleSpec dd = moduleSpecFromFile(specfile("spec2.spec"));
ConstElementPtr arg = Element::fromJSON("{}");
@@ -220,3 +270,109 @@ TEST(ModuleSpec, NamedSetValidation) {
EXPECT_FALSE(dataTest(dd, "data32_2.data"));
EXPECT_FALSE(dataTest(dd, "data32_3.data"));
}
+
+TEST(ModuleSpec, CheckFormat) {
+
+ const std::string json_begin = "{ \"module_spec\": { \"module_name\": \"Foo\", \"statistics\": [ { \"item_name\": \"dummy_time\", \"item_type\": \"string\", \"item_optional\": true, \"item_title\": \"Dummy Time\", \"item_description\": \"A dummy date time\"";
+ const std::string json_end = " } ] } }";
+ std::string item_default;
+ std::string item_format;
+ std::vector<std::string> specs;
+ ConstElementPtr el;
+
+ specs.clear();
+ item_default = "\"item_default\": \"2011-05-27T19:42:57Z\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-05-27\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"19:42:57\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_format);
+ item_default = "";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_format);
+ item_default = "";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_format);
+
+ item_default = "\"item_default\": \"a\"";
+ specs.push_back("," + item_default);
+ item_default = "\"item_default\": \"b\"";
+ specs.push_back("," + item_default);
+ item_default = "\"item_default\": \"c\"";
+ specs.push_back("," + item_default);
+
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_format);
+
+ specs.push_back("");
+
+ BOOST_FOREACH(std::string s, specs) {
+ el = Element::fromJSON(json_begin + s + json_end)->get("module_spec");
+ EXPECT_NO_THROW(ModuleSpec(el, true));
+ }
+
+ specs.clear();
+ item_default = "\"item_default\": \"2011-05-27T19:42:57Z\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-05-27\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"19:42:57Z\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"2011-13-99T99:99:99Z\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-13-99\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"99:99:99Z\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ // wrong date-time-type format not ending with "Z"
+ item_default = "\"item_default\": \"2011-05-27T19:42:57\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ // wrong date-type format ending with "T"
+ item_default = "\"item_default\": \"2011-05-27T\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ // wrong time-type format ending with "Z"
+ item_default = "\"item_default\": \"19:42:57Z\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ BOOST_FOREACH(std::string s, specs) {
+ el = Element::fromJSON(json_begin + s + json_end)->get("module_spec");
+ EXPECT_THROW(ModuleSpec(el, true), ModuleSpecError);
+ }
+}
diff --git a/src/lib/config/tests/testdata/Makefile.am b/src/lib/config/tests/testdata/Makefile.am
index 91d7f04..0d8b92e 100644
--- a/src/lib/config/tests/testdata/Makefile.am
+++ b/src/lib/config/tests/testdata/Makefile.am
@@ -25,6 +25,8 @@ EXTRA_DIST += data22_10.data
EXTRA_DIST += data32_1.data
EXTRA_DIST += data32_2.data
EXTRA_DIST += data32_3.data
+EXTRA_DIST += data33_1.data
+EXTRA_DIST += data33_2.data
EXTRA_DIST += spec1.spec
EXTRA_DIST += spec2.spec
EXTRA_DIST += spec3.spec
@@ -57,3 +59,9 @@ EXTRA_DIST += spec29.spec
EXTRA_DIST += spec30.spec
EXTRA_DIST += spec31.spec
EXTRA_DIST += spec32.spec
+EXTRA_DIST += spec33.spec
+EXTRA_DIST += spec34.spec
+EXTRA_DIST += spec35.spec
+EXTRA_DIST += spec36.spec
+EXTRA_DIST += spec37.spec
+EXTRA_DIST += spec38.spec
diff --git a/src/lib/config/tests/testdata/data33_1.data b/src/lib/config/tests/testdata/data33_1.data
new file mode 100644
index 0000000..429852c
--- /dev/null
+++ b/src/lib/config/tests/testdata/data33_1.data
@@ -0,0 +1,7 @@
+{
+ "dummy_str": "Dummy String",
+ "dummy_int": 118,
+ "dummy_datetime": "2011-05-27T19:42:57Z",
+ "dummy_date": "2011-05-27",
+ "dummy_time": "19:42:57"
+}
diff --git a/src/lib/config/tests/testdata/data33_2.data b/src/lib/config/tests/testdata/data33_2.data
new file mode 100644
index 0000000..eb0615c
--- /dev/null
+++ b/src/lib/config/tests/testdata/data33_2.data
@@ -0,0 +1,7 @@
+{
+ "dummy_str": "Dummy String",
+ "dummy_int": 118,
+ "dummy_datetime": "xxxx",
+ "dummy_date": "xxxx",
+ "dummy_time": "xxxx"
+}
diff --git a/src/lib/config/tests/testdata/spec2.spec b/src/lib/config/tests/testdata/spec2.spec
index 59b8ebc..4352422 100644
--- a/src/lib/config/tests/testdata/spec2.spec
+++ b/src/lib/config/tests/testdata/spec2.spec
@@ -66,6 +66,17 @@
"command_description": "Shut down BIND 10",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "dummy_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Dummy Time",
+ "item_description": "A dummy date time",
+ "item_format": "date-time"
+ }
]
}
}
diff --git a/src/lib/config/tests/testdata/spec33.spec b/src/lib/config/tests/testdata/spec33.spec
new file mode 100644
index 0000000..3002488
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec33.spec
@@ -0,0 +1,50 @@
+{
+ "module_spec": {
+ "module_name": "Spec33",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String",
+ "item_description": "A dummy string"
+ },
+ {
+ "item_name": "dummy_int",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Dummy Integer",
+ "item_description": "A dummy integer"
+ },
+ {
+ "item_name": "dummy_datetime",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Dummy DateTime",
+ "item_description": "A dummy datetime",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "dummy_date",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01",
+ "item_title": "Dummy Date",
+ "item_description": "A dummy date",
+ "item_format": "date"
+ },
+ {
+ "item_name": "dummy_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "00:00:00",
+ "item_title": "Dummy Time",
+ "item_description": "A dummy time",
+ "item_format": "time"
+ }
+ ]
+ }
+}
diff --git a/src/lib/config/tests/testdata/spec34.spec b/src/lib/config/tests/testdata/spec34.spec
new file mode 100644
index 0000000..dd1f3ca
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec34.spec
@@ -0,0 +1,14 @@
+{
+ "module_spec": {
+ "module_name": "Spec34",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_description": "A dummy string"
+ }
+ ]
+ }
+}
diff --git a/src/lib/config/tests/testdata/spec35.spec b/src/lib/config/tests/testdata/spec35.spec
new file mode 100644
index 0000000..86aaf14
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec35.spec
@@ -0,0 +1,15 @@
+{
+ "module_spec": {
+ "module_name": "Spec35",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec36.spec b/src/lib/config/tests/testdata/spec36.spec
new file mode 100644
index 0000000..fb9ce26
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec36.spec
@@ -0,0 +1,17 @@
+{
+ "module_spec": {
+ "module_name": "Spec36",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String",
+ "item_description": "A dummy string",
+ "item_format": "dummy"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec37.spec b/src/lib/config/tests/testdata/spec37.spec
new file mode 100644
index 0000000..bc444d1
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec37.spec
@@ -0,0 +1,7 @@
+{
+ "module_spec": {
+ "module_name": "Spec37",
+ "statistics": 8
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec38.spec b/src/lib/config/tests/testdata/spec38.spec
new file mode 100644
index 0000000..1892e88
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec38.spec
@@ -0,0 +1,17 @@
+{
+ "module_spec": {
+ "module_name": "Spec38",
+ "statistics": [
+ {
+ "item_name": "dummy_datetime",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "11",
+ "item_title": "Dummy DateTime",
+ "item_description": "A dummy datetime",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index 261baae..8f8a5ce 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -21,7 +21,9 @@ libdatasrc_la_SOURCES += memory_datasrc.h memory_datasrc.cc
libdatasrc_la_SOURCES += zone.h
libdatasrc_la_SOURCES += result.h
libdatasrc_la_SOURCES += logger.h logger.cc
-libdatasrc_la_SOURCES += client.h
+libdatasrc_la_SOURCES += client.h iterator.h
+libdatasrc_la_SOURCES += database.h database.cc
+libdatasrc_la_SOURCES += sqlite3_accessor.h sqlite3_accessor.cc
nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index a830f00..c43092d 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -15,11 +15,20 @@
#ifndef __DATA_SOURCE_CLIENT_H
#define __DATA_SOURCE_CLIENT_H 1
+#include <boost/noncopyable.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <exceptions/exceptions.h>
+
#include <datasrc/zone.h>
namespace isc {
namespace datasrc {
+// The iterator.h is not included on purpose, most application won't need it
+class ZoneIterator;
+typedef boost::shared_ptr<ZoneIterator> ZoneIteratorPtr;
+
/// \brief The base class of data source clients.
///
/// This is an abstract base class that defines the common interface for
@@ -141,6 +150,36 @@ public:
/// \param name A domain name for which the search is performed.
/// \return A \c FindResult object enclosing the search result (see above).
virtual FindResult findZone(const isc::dns::Name& name) const = 0;
+
+ /// \brief Returns an iterator to the given zone
+ ///
+ /// This allows for traversing the whole zone. The returned object can
+ /// provide the RRsets one by one.
+ ///
+ /// This throws DataSourceError when the zone does not exist in the
+ /// datasource.
+ ///
+ /// The default implementation throws isc::NotImplemented. This allows
+ /// for easy and fast deployment of minimal custom data sources, where
+ /// the user/implementator doesn't have to care about anything else but
+ /// the actual queries. Also, in some cases, it isn't possible to traverse
+ /// the zone from logic point of view (eg. dynamically generated zone
+ /// data).
+ ///
+ /// It is not fixed if a concrete implementation of this method can throw
+ /// anything else.
+ ///
+ /// \param name The name of zone apex to be traversed. It doesn't do
+ /// nearest match as findZone.
+ /// \return Pointer to the iterator.
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const {
+ // This is here to both document the parameter in doxygen (therefore it
+ // needs a name) and avoid unused parameter warning.
+ static_cast<void>(name);
+
+ isc_throw(isc::NotImplemented,
+ "Data source doesn't support iteration");
+ }
};
}
}
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
new file mode 100644
index 0000000..f0ac192
--- /dev/null
+++ b/src/lib/datasrc/database.cc
@@ -0,0 +1,501 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <vector>
+
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
+
+#include <exceptions/exceptions.h>
+#include <dns/name.h>
+#include <dns/rrclass.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <datasrc/data_source.h>
+#include <datasrc/logger.h>
+
+#include <boost/foreach.hpp>
+
+#include <string>
+
+using namespace isc::dns;
+using std::string;
+
+namespace isc {
+namespace datasrc {
+
+DatabaseClient::DatabaseClient(boost::shared_ptr<DatabaseAccessor>
+ database) :
+ database_(database)
+{
+ if (database_.get() == NULL) {
+ isc_throw(isc::InvalidParameter,
+ "No database provided to DatabaseClient");
+ }
+}
+
+DataSourceClient::FindResult
+DatabaseClient::findZone(const Name& name) const {
+ std::pair<bool, int> zone(database_->getZone(name));
+ // Try exact first
+ if (zone.first) {
+ return (FindResult(result::SUCCESS,
+ ZoneFinderPtr(new Finder(database_,
+ zone.second, name))));
+ }
+ // Then super domains
+ // Start from 1, as 0 is covered above
+ for (size_t i(1); i < name.getLabelCount(); ++i) {
+ isc::dns::Name superdomain(name.split(i));
+ zone = database_->getZone(superdomain);
+ if (zone.first) {
+ return (FindResult(result::PARTIALMATCH,
+ ZoneFinderPtr(new Finder(database_,
+ zone.second,
+ superdomain))));
+ }
+ }
+ // No, really nothing
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+}
+
+DatabaseClient::Finder::Finder(boost::shared_ptr<DatabaseAccessor>
+ database, int zone_id,
+ const isc::dns::Name& origin) :
+ database_(database),
+ zone_id_(zone_id),
+ origin_(origin)
+{ }
+
+namespace {
+// Adds the given Rdata to the given RRset
+// If the rrset is an empty pointer, a new one is
+// created with the given name, class, type and ttl
+// The type is checked if the rrset exists, but the
+// name is not.
+//
+// Then adds the given rdata to the set
+//
+// Raises a DataSourceError if the type does not
+// match, or if the given rdata string does not
+// parse correctly for the given type and class
+//
+// The DatabaseAccessor is passed to print the
+// database name in the log message if the TTL is
+// modified
+void addOrCreate(isc::dns::RRsetPtr& rrset,
+ const isc::dns::Name& name,
+ const isc::dns::RRClass& cls,
+ const isc::dns::RRType& type,
+ const isc::dns::RRTTL& ttl,
+ const std::string& rdata_str,
+ const DatabaseAccessor& db
+ )
+{
+ if (!rrset) {
+ rrset.reset(new isc::dns::RRset(name, cls, type, ttl));
+ } else {
+ // This is a check to make sure find() is not messing things up
+ assert(type == rrset->getType());
+ if (ttl != rrset->getTTL()) {
+ if (ttl < rrset->getTTL()) {
+ rrset->setTTL(ttl);
+ }
+ logger.warn(DATASRC_DATABASE_FIND_TTL_MISMATCH)
+ .arg(db.getDBName()).arg(name).arg(cls)
+ .arg(type).arg(rrset->getTTL());
+ }
+ }
+ try {
+ rrset->addRdata(isc::dns::rdata::createRdata(type, cls, rdata_str));
+ } catch (const isc::dns::rdata::InvalidRdataText& ivrt) {
+ // at this point, rrset may have been initialised for no reason,
+ // and won't be used. But the caller would drop the shared_ptr
+ // on such an error anyway, so we don't care.
+ isc_throw(DataSourceError,
+ "bad rdata in database for " << name << " "
+ << type << ": " << ivrt.what());
+ }
+}
+
+// This class keeps a short-lived store of RRSIG records encountered
+// during a call to find(). If the backend happens to return signatures
+// before the actual data, we might not know which signatures we will need
+// So if they may be relevant, we store the in this class.
+//
+// (If this class seems useful in other places, we might want to move
+// it to util. That would also provide an opportunity to add unit tests)
+class RRsigStore {
+public:
+ // Adds the given signature Rdata to the store
+ // The signature rdata MUST be of the RRSIG rdata type
+ // (the caller must make sure of this).
+ // NOTE: if we move this class to a public namespace,
+ // we should add a type_covered argument, so as not
+ // to have to do this cast here.
+ void addSig(isc::dns::rdata::RdataPtr sig_rdata) {
+ const isc::dns::RRType& type_covered =
+ static_cast<isc::dns::rdata::generic::RRSIG*>(
+ sig_rdata.get())->typeCovered();
+ sigs[type_covered].push_back(sig_rdata);
+ }
+
+ // If the store contains signatures for the type of the given
+ // rrset, they are appended to it.
+ void appendSignatures(isc::dns::RRsetPtr& rrset) const {
+ std::map<isc::dns::RRType,
+ std::vector<isc::dns::rdata::RdataPtr> >::const_iterator
+ found = sigs.find(rrset->getType());
+ if (found != sigs.end()) {
+ BOOST_FOREACH(isc::dns::rdata::RdataPtr sig, found->second) {
+ rrset->addRRsig(sig);
+ }
+ }
+ }
+
+private:
+ std::map<isc::dns::RRType, std::vector<isc::dns::rdata::RdataPtr> > sigs;
+};
+}
+
+std::pair<bool, isc::dns::RRsetPtr>
+DatabaseClient::Finder::getRRset(const isc::dns::Name& name,
+ const isc::dns::RRType* type,
+ bool want_cname, bool want_dname,
+ bool want_ns)
+{
+ RRsigStore sig_store;
+ bool records_found = false;
+ isc::dns::RRsetPtr result_rrset;
+
+ // Request the context
+ DatabaseAccessor::IteratorContextPtr
+ context(database_->getRecords(name.toText(), zone_id_));
+ // It must not return NULL, that's a bug of the implementation
+ if (!context) {
+ isc_throw(isc::Unexpected, "Iterator context null at " +
+ name.toText());
+ }
+
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ while (context->getNext(columns)) {
+ if (!records_found) {
+ records_found = true;
+ }
+
+ try {
+ const isc::dns::RRType cur_type(columns[DatabaseAccessor::
+ TYPE_COLUMN]);
+ const isc::dns::RRTTL cur_ttl(columns[DatabaseAccessor::
+ TTL_COLUMN]);
+ // Ths sigtype column was an optimization for finding the
+ // relevant RRSIG RRs for a lookup. Currently this column is
+ // not used in this revised datasource implementation. We
+ // should either start using it again, or remove it from use
+ // completely (i.e. also remove it from the schema and the
+ // backend implementation).
+ // Note that because we don't use it now, we also won't notice
+ // it if the value is wrong (i.e. if the sigtype column
+ // contains an rrtype that is different from the actual value
+ // of the 'type covered' field in the RRSIG Rdata).
+ //cur_sigtype(columns[SIGTYPE_COLUMN]);
+
+ // Check for delegations before checking for the right type.
+ // This is needed to properly delegate request for the NS
+ // record itself.
+ //
+ // This happens with NS only, CNAME must be alone and DNAME
+ // is not checked in the exact queried domain.
+ if (want_ns && cur_type == isc::dns::RRType::NS()) {
+ if (result_rrset &&
+ result_rrset->getType() != isc::dns::RRType::NS()) {
+ isc_throw(DataSourceError, "NS found together with data"
+ " in non-apex domain " + name.toText());
+ }
+ addOrCreate(result_rrset, name, getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *database_);
+ } else if (type != NULL && cur_type == *type) {
+ if (result_rrset &&
+ result_rrset->getType() == isc::dns::RRType::CNAME()) {
+ isc_throw(DataSourceError, "CNAME found but it is not "
+ "the only record for " + name.toText());
+ } else if (result_rrset && want_ns &&
+ result_rrset->getType() == isc::dns::RRType::NS()) {
+ isc_throw(DataSourceError, "NS found together with data"
+ " in non-apex domain " + name.toText());
+ }
+ addOrCreate(result_rrset, name, getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *database_);
+ } else if (want_cname && cur_type == isc::dns::RRType::CNAME()) {
+ // There should be no other data, so result_rrset should
+ // be empty.
+ if (result_rrset) {
+ isc_throw(DataSourceError, "CNAME found but it is not "
+ "the only record for " + name.toText());
+ }
+ addOrCreate(result_rrset, name, getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *database_);
+ } else if (want_dname && cur_type == isc::dns::RRType::DNAME()) {
+ // There should be max one RR of DNAME present
+ if (result_rrset &&
+ result_rrset->getType() == isc::dns::RRType::DNAME()) {
+ isc_throw(DataSourceError, "DNAME with multiple RRs in " +
+ name.toText());
+ }
+ addOrCreate(result_rrset, name, getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *database_);
+ } else if (cur_type == isc::dns::RRType::RRSIG()) {
+ // If we get signatures before we get the actual data, we
+ // can't know which ones to keep and which to drop...
+ // So we keep a separate store of any signature that may be
+ // relevant and add them to the final RRset when we are
+ // done.
+ // A possible optimization here is to not store them for
+ // types we are certain we don't need
+ sig_store.addSig(isc::dns::rdata::createRdata(cur_type,
+ getClass(), columns[DatabaseAccessor::RDATA_COLUMN]));
+ }
+ } catch (const isc::dns::InvalidRRType& irt) {
+ isc_throw(DataSourceError, "Invalid RRType in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ TYPE_COLUMN]);
+ } catch (const isc::dns::InvalidRRTTL& irttl) {
+ isc_throw(DataSourceError, "Invalid TTL in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ TTL_COLUMN]);
+ } catch (const isc::dns::rdata::InvalidRdataText& ird) {
+ isc_throw(DataSourceError, "Invalid rdata in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ RDATA_COLUMN]);
+ }
+ }
+ if (result_rrset) {
+ sig_store.appendSignatures(result_rrset);
+ }
+ return (std::pair<bool, isc::dns::RRsetPtr>(records_found, result_rrset));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList*,
+ const FindOptions options)
+{
+ // This variable is used to determine the difference between
+ // NXDOMAIN and NXRRSET
+ bool records_found = false;
+ bool glue_ok(options & FIND_GLUE_OK);
+ isc::dns::RRsetPtr result_rrset;
+ ZoneFinder::Result result_status = SUCCESS;
+ std::pair<bool, isc::dns::RRsetPtr> found;
+ logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
+ .arg(database_->getDBName()).arg(name).arg(type);
+
+ // First, do we have any kind of delegation (NS/DNAME) here?
+ const Name origin(getOrigin());
+ const size_t origin_label_count(origin.getLabelCount());
+ const size_t current_label_count(name.getLabelCount());
+ // This is how many labels we remove to get origin
+ const size_t remove_labels(current_label_count - origin_label_count);
+
+ // Now go trough all superdomains from origin down
+ for (int i(remove_labels); i > 0; --i) {
+ const Name superdomain(name.split(i));
+ // Look if there's NS or DNAME (but ignore the NS in origin)
+ found = getRRset(superdomain, NULL, false, true,
+ i != remove_labels && !glue_ok);
+ if (found.second) {
+ // We found something redirecting somewhere else
+ // (it can be only NS or DNAME here)
+ result_rrset = found.second;
+ if (result_rrset->getType() == isc::dns::RRType::NS()) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DELEGATION).
+ arg(database_->getDBName()).arg(superdomain);
+ result_status = DELEGATION;
+ } else {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DNAME).
+ arg(database_->getDBName()).arg(superdomain);
+ result_status = DNAME;
+ }
+ // Don't search more
+ break;
+ }
+ }
+
+ if (!result_rrset) { // Only if we didn't find a redirect already
+ // Try getting the final result and extract it
+ // It is special if there's a CNAME or NS, DNAME is ignored here
+ // And we don't consider the NS in origin
+ found = getRRset(name, &type, true, false, name != origin && !glue_ok);
+ records_found = found.first;
+ result_rrset = found.second;
+ if (result_rrset && name != origin && !glue_ok &&
+ result_rrset->getType() == isc::dns::RRType::NS()) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DELEGATION_EXACT).
+ arg(database_->getDBName()).arg(name);
+ result_status = DELEGATION;
+ } else if (result_rrset && type != isc::dns::RRType::CNAME() &&
+ result_rrset->getType() == isc::dns::RRType::CNAME()) {
+ result_status = CNAME;
+ }
+ }
+
+ if (!result_rrset) {
+ if (records_found) {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXRRSET)
+ .arg(database_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXRRSET;
+ } else {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXDOMAIN)
+ .arg(database_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXDOMAIN;
+ }
+ } else {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_RRSET)
+ .arg(database_->getDBName()).arg(*result_rrset);
+ }
+ return (FindResult(result_status, result_rrset));
+}
+
+Name
+DatabaseClient::Finder::getOrigin() const {
+ return (origin_);
+}
+
+isc::dns::RRClass
+DatabaseClient::Finder::getClass() const {
+ // TODO Implement
+ return isc::dns::RRClass::IN();
+}
+
+namespace {
+
+/*
+ * This needs, beside of converting all data from textual representation, group
+ * together rdata of the same RRsets. To do this, we hold one row of data ahead
+ * of iteration. When we get a request to provide data, we create it from this
+ * data and load a new one. If it is to be put to the same rrset, we add it.
+ * Otherwise we just return what we have and keep the row as the one ahead
+ * for next time.
+ */
+class DatabaseIterator : public ZoneIterator {
+public:
+ DatabaseIterator(const DatabaseAccessor::IteratorContextPtr& context,
+ const RRClass& rrclass) :
+ context_(context),
+ class_(rrclass),
+ ready_(true)
+ {
+ // Prepare data for the next time
+ getData();
+ }
+
+ virtual isc::dns::ConstRRsetPtr getNextRRset() {
+ if (!ready_) {
+ isc_throw(isc::Unexpected, "Iterating past the zone end");
+ }
+ if (!data_ready_) {
+ // At the end of zone
+ ready_ = false;
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_ITERATE_END);
+ return (ConstRRsetPtr());
+ }
+ string name_str(name_), rtype_str(rtype_), ttl(ttl_);
+ Name name(name_str);
+ RRType rtype(rtype_str);
+ RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
+ while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
+ if (ttl_ != ttl) {
+ if (ttl < ttl_) {
+ ttl_ = ttl;
+ rrset->setTTL(RRTTL(ttl));
+ }
+ LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
+ arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
+ }
+ rrset->addRdata(rdata::createRdata(rtype, class_, rdata_));
+ getData();
+ }
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE_NEXT).
+ arg(rrset->getName()).arg(rrset->getType());
+ return (rrset);
+ }
+private:
+ // Load next row of data
+ void getData() {
+ string data[DatabaseAccessor::COLUMN_COUNT];
+ data_ready_ = context_->getNext(data);
+ name_ = data[DatabaseAccessor::NAME_COLUMN];
+ rtype_ = data[DatabaseAccessor::TYPE_COLUMN];
+ ttl_ = data[DatabaseAccessor::TTL_COLUMN];
+ rdata_ = data[DatabaseAccessor::RDATA_COLUMN];
+ }
+
+ // The context
+ const DatabaseAccessor::IteratorContextPtr context_;
+ // Class of the zone
+ RRClass class_;
+ // Status
+ bool ready_, data_ready_;
+ // Data of the next row
+ string name_, rtype_, rdata_, ttl_;
+};
+
+}
+
+ZoneIteratorPtr
+DatabaseClient::getIterator(const isc::dns::Name& name) const {
+ // Get the zone
+ std::pair<bool, int> zone(database_->getZone(name));
+ if (!zone.first) {
+ // No such zone, can't continue
+ isc_throw(DataSourceError, "Zone " + name.toText() +
+ " can not be iterated, because it doesn't exist "
+ "in this data source");
+ }
+ // Request the context
+ DatabaseAccessor::IteratorContextPtr
+ context(database_->getAllRecords(zone.second));
+ // It must not return NULL, that's a bug of the implementation
+ if (context == DatabaseAccessor::IteratorContextPtr()) {
+ isc_throw(isc::Unexpected, "Iterator context null at " +
+ name.toText());
+ }
+ // Create the iterator and return it
+ // TODO: Once #1062 is merged with this, we need to get the
+ // actual zone class from the connection, as the DatabaseClient
+ // doesn't know it and the iterator needs it (so it wouldn't query
+ // it each time)
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE).
+ arg(name);
+ return (ZoneIteratorPtr(new DatabaseIterator(context, RRClass::IN())));
+}
+
+}
+}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
new file mode 100644
index 0000000..d7785e6
--- /dev/null
+++ b/src/lib/datasrc/database.h
@@ -0,0 +1,430 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATABASE_DATASRC_H
+#define __DATABASE_DATASRC_H
+
+#include <datasrc/client.h>
+
+#include <dns/name.h>
+#include <exceptions/exceptions.h>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Abstraction of lowlevel database with DNS data
+ *
+ * This class is defines interface to databases. Each supported database
+ * will provide methods for accessing the data stored there in a generic
+ * manner. The methods are meant to be low-level, without much or any knowledge
+ * about DNS and should be possible to translate directly to queries.
+ *
+ * On the other hand, how the communication with database is done and in what
+ * schema (in case of relational/SQL database) is up to the concrete classes.
+ *
+ * This class is non-copyable, as copying connections to database makes little
+ * sense and will not be needed.
+ *
+ * \todo Is it true this does not need to be copied? For example the zone
+ * iterator might need it's own copy. But a virtual clone() method might
+ * be better for that than copy constructor.
+ *
+ * \note The same application may create multiple connections to the same
+ * database, having multiple instances of this class. If the database
+ * allows having multiple open queries at one connection, the connection
+ * class may share it.
+ */
+class DatabaseAccessor : boost::noncopyable {
+public:
+ /**
+ * Definitions of the fields as they are required to be filled in
+ * by IteratorContext::getNext()
+ *
+ * When implementing getNext(), the columns array should
+ * be filled with the values as described in this enumeration,
+ * in this order, i.e. TYPE_COLUMN should be the first element
+ * (index 0) of the array, TTL_COLUMN should be the second element
+ * (index 1), etc.
+ */
+ enum RecordColumns {
+ TYPE_COLUMN = 0, ///< The RRType of the record (A/NS/TXT etc.)
+ TTL_COLUMN = 1, ///< The TTL of the record (a
+ SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPE
+ ///< the RRSIG covers. In the current implementation,
+ ///< this field is ignored.
+ RDATA_COLUMN = 3, ///< Full text representation of the record's RDATA
+ NAME_COLUMN = 4, ///< The domain name of this RR
+ COLUMN_COUNT = 5 ///< The total number of columns, MUST be value of
+ ///< the largest other element in this enum plus 1.
+ };
+
+ /**
+ * \brief Destructor
+ *
+ * It is empty, but needs a virtual one, since we will use the derived
+ * classes in polymorphic way.
+ */
+ virtual ~DatabaseAccessor() { }
+ /**
+ * \brief Retrieve a zone identifier
+ *
+ * This method looks up a zone for the given name in the database. It
+ * should match only exact zone name (eg. name is equal to the zone's
+ * apex), as the DatabaseClient will loop trough the labels itself and
+ * find the most suitable zone.
+ *
+ * It is not specified if and what implementation of this method may throw,
+ * so code should expect anything.
+ *
+ * \param name The name of the zone's apex to be looked up.
+ * \return The first part of the result indicates if a matching zone
+ * was found. In case it was, the second part is internal zone ID.
+ * This one will be passed to methods finding data in the zone.
+ * It is not required to keep them, in which case whatever might
+ * be returned - the ID is only passed back to the database as
+ * an opaque handle.
+ */
+ virtual std::pair<bool, int> getZone(const isc::dns::Name& name) const = 0;
+
+ /**
+ * \brief This holds the internal context of ZoneIterator for databases
+ *
+ * While the ZoneIterator implementation from DatabaseClient does all the
+ * translation from strings to DNS classes and validation, this class
+ * holds the pointer to where the database is at reading the data.
+ *
+ * It can either hold shared pointer to the connection which created it
+ * and have some kind of statement inside (in case single database
+ * connection can handle multiple concurrent SQL statements) or it can
+ * create a new connection (or, if it is more convenient, the connection
+ * itself can inherit both from DatabaseConnection and IteratorContext
+ * and just clone itself).
+ */
+ class IteratorContext : public boost::noncopyable {
+ public:
+ /**
+ * \brief Destructor
+ *
+ * Virtual destructor, so any descendand class is destroyed correctly.
+ */
+ virtual ~IteratorContext() { }
+
+ /**
+ * \brief Function to provide next resource record
+ *
+ * This function should provide data about the next resource record
+ * from the data that is searched. The data is not converted yet.
+ *
+ * Depending on how the iterator was constructed, there is a difference
+ * in behaviour; for a 'full zone iterator', created with
+ * getAllRecords(), all COLUMN_COUNT elements of the array are
+ * overwritten.
+ * For a 'name iterator', created with getRecords(), the column
+ * NAME_COLUMN is untouched, since what would be added here is by
+ * definition already known to the caller (it already passes it as
+ * an argument to getRecords()).
+ *
+ * \note The order of RRs is not strictly set, but the RRs for single
+ * RRset must not be interleaved with any other RRs (eg. RRsets must be
+ * "together").
+ *
+ * \param columns The data will be returned through here. The order
+ * is specified by the RecordColumns enum, and the size must be
+ * COLUMN_COUNT
+ * \todo Do we consider databases where it is stored in binary blob
+ * format?
+ * \throw DataSourceError if there's database-related error. If the
+ * exception (or any other in case of derived class) is thrown,
+ * the iterator can't be safely used any more.
+ * \return true if a record was found, and the columns array was
+ * updated. false if there was no more data, in which case
+ * the columns array is untouched.
+ */
+ virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) = 0;
+ };
+
+ typedef boost::shared_ptr<IteratorContext> IteratorContextPtr;
+
+ /**
+ * \brief Creates an iterator context for a specific name.
+ *
+ * Returns an IteratorContextPtr that contains all records of the
+ * given name from the given zone.
+ *
+ * The implementation of the iterator that is returned may leave the
+ * NAME_COLUMN column of the array passed to getNext() untouched, as that
+ * data is already known (it is the same as the name argument here)
+ *
+ * \exception any Since any implementation can be used, the caller should
+ * expect any exception to be thrown.
+ *
+ * \param name The name to search for. This should be a FQDN.
+ * \param id The ID of the zone, returned from getZone().
+ * \return Newly created iterator context. Must not be NULL.
+ */
+ virtual IteratorContextPtr getRecords(const std::string& name,
+ int id) const = 0;
+
+ /**
+ * \brief Creates an iterator context for the whole zone.
+ *
+ * Returns an IteratorContextPtr that contains all records of the
+ * zone with the given zone id.
+ *
+ * Each call to getNext() on the returned iterator should copy all
+ * column fields of the array that is passed, as defined in the
+ * RecordColumns enum.
+ *
+ * \exception any Since any implementation can be used, the caller should
+ * expect any exception to be thrown.
+ *
+ * \param id The ID of the zone, returned from getZone().
+ * \return Newly created iterator context. Must not be NULL.
+ */
+ virtual IteratorContextPtr getAllRecords(int id) const = 0;
+
+ /**
+ * \brief Returns a string identifying this dabase backend
+ *
+ * The returned string is mainly intended to be used for
+ * debugging/logging purposes.
+ *
+ * Any implementation is free to choose the exact string content,
+ * but it is advisable to make it a name that is distinguishable
+ * from the others.
+ *
+ * \return the name of the database
+ */
+ virtual const std::string& getDBName() const = 0;
+};
+
+/**
+ * \brief Concrete data source client oriented at database backends.
+ *
+ * This class (together with corresponding versions of ZoneFinder,
+ * ZoneIterator, etc.) translates high-level data source queries to
+ * low-level calls on DatabaseAccessor. It calls multiple queries
+ * if necessary and validates data from the database, allowing the
+ * DatabaseAccessor to be just simple translation to SQL/other
+ * queries to database.
+ *
+ * While it is possible to subclass it for specific database in case
+ * of special needs, it is not expected to be needed. This should just
+ * work as it is with whatever DatabaseAccessor.
+ */
+class DatabaseClient : public DataSourceClient {
+public:
+ /**
+ * \brief Constructor
+ *
+ * It initializes the client with a database.
+ *
+ * \exception isc::InvalidParameter if database is NULL. It might throw
+ * standard allocation exception as well, but doesn't throw anything else.
+ *
+ * \param database The database to use to get data. As the parameter
+ * suggests, the client takes ownership of the database and will
+ * delete it when itself deleted.
+ */
+ DatabaseClient(boost::shared_ptr<DatabaseAccessor> database);
+ /**
+ * \brief Corresponding ZoneFinder implementation
+ *
+ * The zone finder implementation for database data sources. Similarly
+ * to the DatabaseClient, it translates the queries to methods of the
+ * database.
+ *
+ * Application should not come directly in contact with this class
+ * (it should handle it trough generic ZoneFinder pointer), therefore
+ * it could be completely hidden in the .cc file. But it is provided
+ * to allow testing and for rare cases when a database needs slightly
+ * different handling, so it can be subclassed.
+ *
+ * Methods directly corresponds to the ones in ZoneFinder.
+ */
+ class Finder : public ZoneFinder {
+ public:
+ /**
+ * \brief Constructor
+ *
+ * \param database The database (shared with DatabaseClient) to
+ * be used for queries (the one asked for ID before).
+ * \param zone_id The zone ID which was returned from
+ * DatabaseAccessor::getZone and which will be passed to further
+ * calls to the database.
+ * \param origin The name of the origin of this zone. It could query
+ * it from database, but as the DatabaseClient just searched for
+ * the zone using the name, it should have it.
+ */
+ Finder(boost::shared_ptr<DatabaseAccessor> database, int zone_id,
+ const isc::dns::Name& origin);
+ // The following three methods are just implementations of inherited
+ // ZoneFinder's pure virtual methods.
+ virtual isc::dns::Name getOrigin() const;
+ virtual isc::dns::RRClass getClass() const;
+
+ /**
+ * \brief Find an RRset in the datasource
+ *
+ * Searches the datasource for an RRset of the given name and
+ * type. If there is a CNAME at the given name, the CNAME rrset
+ * is returned.
+ * (this implementation is not complete, and currently only
+ * does full matches, CNAMES, and the signatures for matches and
+ * CNAMEs)
+ * \note target was used in the original design to handle ANY
+ * queries. This is not implemented yet, and may use
+ * target again for that, but it might also use something
+ * different. It is left in for compatibility at the moment.
+ * \note options are ignored at this moment
+ *
+ * \note Maybe counter intuitively, this method is not a const member
+ * function. This is intentional; some of the underlying implementations
+ * are expected to use a database backend, and would internally contain
+ * some abstraction of "database connection". In the most strict sense
+ * any (even read only) operation might change the internal state of
+ * such a connection, and in that sense the operation cannot be considered
+ * "const". In order to avoid giving a false sense of safety to the
+ * caller, we indicate a call to this method may have a surprising
+ * side effect. That said, this view may be too strict and it may
+ * make sense to say the internal database connection doesn't affect
+ * external behavior in terms of the interface of this method. As
+ * we gain more experiences with various kinds of backends we may
+ * revisit the constness.
+ *
+ * \exception DataSourceError when there is a problem reading
+ * the data from the dabase backend.
+ * This can be a connection, code, or
+ * data (parse) error.
+ *
+ * \param name The name to find
+ * \param type The RRType to find
+ * \param target Unused at this moment
+ * \param options Options about how to search.
+ * See ZoneFinder::FindOptions.
+ */
+ virtual FindResult find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList* target = NULL,
+ const FindOptions options = FIND_DEFAULT);
+
+ /**
+ * \brief The zone ID
+ *
+ * This function provides the stored zone ID as passed to the
+ * constructor. This is meant for testing purposes and normal
+ * applications shouldn't need it.
+ */
+ int zone_id() const { return (zone_id_); }
+ /**
+ * \brief The database.
+ *
+ * This function provides the database stored inside as
+ * passed to the constructor. This is meant for testing purposes and
+ * normal applications shouldn't need it.
+ */
+ const DatabaseAccessor& database() const {
+ return (*database_);
+ }
+ private:
+ boost::shared_ptr<DatabaseAccessor> database_;
+ const int zone_id_;
+ const isc::dns::Name origin_;
+ /**
+ * \brief Searches database for an RRset
+ *
+ * This method scans RRs of single domain specified by name and finds
+ * RRset with given type or any of redirection RRsets that are
+ * requested.
+ *
+ * This function is used internally by find(), because this part is
+ * called multiple times with slightly different parameters.
+ *
+ * \param name Which domain name should be scanned.
+ * \param type The RRType which is requested. This can be NULL, in
+ * which case the method will look for the redirections only.
+ * \param want_cname If this is true, CNAME redirection may be returned
+ * instead of the RRset with given type. If there's CNAME and
+ * something else or the CNAME has multiple RRs, it throws
+ * DataSourceError.
+ * \param want_dname If this is true, DNAME redirection may be returned
+ * instead. This is with type = NULL only and is not checked in
+ * other circumstances. If the DNAME has multiple RRs, it throws
+ * DataSourceError.
+ * \param want_ns This allows redirection by NS to be returned. If
+ * any other data is met as well, DataSourceError is thrown.
+ * \note It may happen that some of the above error conditions are not
+ * detected in some circumstances. The goal here is not to validate
+ * the domain in DB, but to avoid bad behaviour resulting from
+ * broken data.
+ * \return First part of the result tells if the domain contains any
+ * RRs. This can be used to decide between NXDOMAIN and NXRRSET.
+ * The second part is the RRset found (if any) with any relevant
+ * signatures attached to it.
+ * \todo This interface doesn't look very elegant. Any better idea
+ * would be nice.
+ */
+ std::pair<bool, isc::dns::RRsetPtr> getRRset(const isc::dns::Name&
+ name,
+ const isc::dns::RRType*
+ type,
+ bool want_cname,
+ bool want_dname,
+ bool want_ns);
+ };
+ /**
+ * \brief Find a zone in the database
+ *
+ * This queries database's getZone to find the best matching zone.
+ * It will propagate whatever exceptions are thrown from that method
+ * (which is not restricted in any way).
+ *
+ * \param name Name of the zone or data contained there.
+ * \return FindResult containing the code and an instance of Finder, if
+ * anything is found. However, application should not rely on the
+ * ZoneFinder being instance of Finder (possible subclass of this class
+ * may return something else and it may change in future versions), it
+ * should use it as a ZoneFinder only.
+ */
+ virtual FindResult findZone(const isc::dns::Name& name) const;
+
+ /**
+ * \brief Get the zone iterator
+ *
+ * The iterator allows going through the whole zone content. If the
+ * underlying DatabaseConnection is implemented correctly, it should
+ * be possible to have multiple ZoneIterators at once and query data
+ * at the same time.
+ *
+ * \exception DataSourceError if the zone doesn't exist.
+ * \exception isc::NotImplemented if the underlying DatabaseConnection
+ * doesn't implement iteration. But in case it is not implemented
+ * and the zone doesn't exist, DataSourceError is thrown.
+ * \exception Anything else the underlying DatabaseConnection might
+ * want to throw.
+ * \param name The origin of the zone to iterate.
+ * \return Shared pointer to the iterator (it will never be NULL)
+ */
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const;
+
+private:
+ /// \brief Our database.
+ const boost::shared_ptr<DatabaseAccessor> database_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index 3dc69e0..659d2bd 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -63,6 +63,60 @@ The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
means no limit.
+% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+
+% DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5
+The datasource backend provided resource records for the given RRset with
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+
+% DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+
+% DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+
+% DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+
+% DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+
+% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+
+% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+
+% DATASRC_DATABASE_ITERATE iterating zone %1
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+
+% DATASRC_DATABASE_ITERATE_END iterating zone finished
+While iterating through the zone, the program reached end of the data.
+
+% DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+
+% DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+
% DATASRC_DO_QUERY handling query for '%1/%2'
A debug message indicating that a query for the given name and RR type is being
processed.
@@ -400,12 +454,22 @@ enough information for it. The code is 1 for error, 2 for not implemented.
% DATASRC_SQLITE_CLOSE closing SQLite database
Debug information. The SQLite data source is closing the database file.
+
+% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
+The database file is being opened so it can start providing data.
+
+% DATASRC_SQLITE_CONNCLOSE Closing sqlite database
+The database file is no longer needed and is being closed.
+
% DATASRC_SQLITE_CREATE SQLite data source created
Debug information. An instance of SQLite data source is being created.
% DATASRC_SQLITE_DESTROY SQLite data source destroyed
Debug information. An instance of SQLite data source is being destroyed.
+% DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized
+The object around a database connection is being destroyed.
+
% DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'
Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
@@ -458,6 +522,9 @@ source.
The SQLite data source was asked to provide a NSEC3 record for given zone.
But it doesn't contain that zone.
+% DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized
+A wrapper object to hold database connection is being initialized.
+
% DATASRC_SQLITE_OPEN opening SQLite database '%1'
Debug information. The SQLite data source is loading an SQLite database in
the provided file.
@@ -496,4 +563,3 @@ data source.
% DATASRC_UNEXPECTED_QUERY_STATE unexpected query state
This indicates a programming error. An internal task of unknown type was
generated.
-
diff --git a/src/lib/datasrc/iterator.h b/src/lib/datasrc/iterator.h
new file mode 100644
index 0000000..0102fcb
--- /dev/null
+++ b/src/lib/datasrc/iterator.h
@@ -0,0 +1,61 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dns/rrset.h>
+
+#include <boost/noncopyable.hpp>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Read-only iterator to a zone.
+ *
+ * You can get an instance of (descendand of) ZoneIterator from
+ * DataSourceClient::getIterator() method. The actual concrete implementation
+ * will be different depending on the actual data source used. This is the
+ * abstract interface.
+ *
+ * There's no way to start iterating from the beginning again or return.
+ */
+class ZoneIterator : public boost::noncopyable {
+public:
+ /**
+ * \brief Destructor
+ *
+ * Virtual destructor. It is empty, but ensures the right destructor from
+ * descendant is called.
+ */
+ virtual ~ ZoneIterator() { }
+
+ /**
+ * \brief Get next RRset from the zone.
+ *
+ * This returns the next RRset in the zone as a shared pointer. The
+ * shared pointer is used to allow both accessing in-memory data and
+ * automatic memory management.
+ *
+ * Any special order is not guaranteed.
+ *
+ * While this can potentially throw anything (including standard allocation
+ * errors), it should be rare.
+ *
+ * \return Pointer to the next RRset or NULL pointer when the iteration
+ * gets to the end of the zone.
+ */
+ virtual isc::dns::ConstRRsetPtr getNextRRset() = 0;
+};
+
+}
+}
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 3d24ce0..1fc9252 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -25,6 +25,8 @@
#include <datasrc/memory_datasrc.h>
#include <datasrc/rbtree.h>
#include <datasrc/logger.h>
+#include <datasrc/iterator.h>
+#include <datasrc/data_source.h>
using namespace std;
using namespace isc::dns;
@@ -32,6 +34,27 @@ using namespace isc::dns;
namespace isc {
namespace datasrc {
+namespace {
+// Some type aliases
+/*
+ * Each domain consists of some RRsets. They will be looked up by the
+ * RRType.
+ *
+ * The use of map is questionable with regard to performance - there'll
+ * be usually only few RRsets in the domain, so the log n benefit isn't
+ * much and a vector/array might be faster due to its simplicity and
+ * continuous memory location. But this is unlikely to be a performance
+ * critical place and map has better interface for the lookups, so we use
+ * that.
+ */
+typedef map<RRType, ConstRRsetPtr> Domain;
+typedef Domain::value_type DomainPair;
+typedef boost::shared_ptr<Domain> DomainPtr;
+// The tree stores domains
+typedef RBTree<Domain> DomainTree;
+typedef RBNode<Domain> DomainNode;
+}
+
// Private data and hidden methods of InMemoryZoneFinder
struct InMemoryZoneFinder::InMemoryZoneFinderImpl {
// Constructor
@@ -44,25 +67,6 @@ struct InMemoryZoneFinder::InMemoryZoneFinderImpl {
DomainPtr origin_domain(new Domain);
origin_data_->setData(origin_domain);
}
-
- // Some type aliases
- /*
- * Each domain consists of some RRsets. They will be looked up by the
- * RRType.
- *
- * The use of map is questionable with regard to performance - there'll
- * be usually only few RRsets in the domain, so the log n benefit isn't
- * much and a vector/array might be faster due to its simplicity and
- * continuous memory location. But this is unlikely to be a performance
- * critical place and map has better interface for the lookups, so we use
- * that.
- */
- typedef map<RRType, ConstRRsetPtr> Domain;
- typedef Domain::value_type DomainPair;
- typedef boost::shared_ptr<Domain> DomainPtr;
- // The tree stores domains
- typedef RBTree<Domain> DomainTree;
- typedef RBNode<Domain> DomainNode;
static const DomainNode::Flags DOMAINFLAG_WILD = DomainNode::FLAG_USER1;
// Information about the zone
@@ -606,19 +610,19 @@ InMemoryZoneFinder::~InMemoryZoneFinder() {
delete impl_;
}
-const Name&
+Name
InMemoryZoneFinder::getOrigin() const {
return (impl_->origin_);
}
-const RRClass&
+RRClass
InMemoryZoneFinder::getClass() const {
return (impl_->zone_class_);
}
ZoneFinder::FindResult
InMemoryZoneFinder::find(const Name& name, const RRType& type,
- RRsetList* target, const FindOptions options) const
+ RRsetList* target, const FindOptions options)
{
return (impl_->find(name, type, target, options));
}
@@ -634,7 +638,7 @@ InMemoryZoneFinder::load(const string& filename) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_LOAD).arg(getOrigin()).
arg(filename);
// Load it into a temporary tree
- InMemoryZoneFinderImpl::DomainTree tmp;
+ DomainTree tmp;
masterLoad(filename.c_str(), getOrigin(), getClass(),
boost::bind(&InMemoryZoneFinderImpl::addFromLoad, impl_, _1, &tmp));
// If it went well, put it inside
@@ -700,8 +704,94 @@ InMemoryClient::addZone(ZoneFinderPtr zone_finder) {
InMemoryClient::FindResult
InMemoryClient::findZone(const isc::dns::Name& name) const {
LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_FIND_ZONE).arg(name);
- return (FindResult(impl_->zone_table.findZone(name).code,
- impl_->zone_table.findZone(name).zone));
+ ZoneTable::FindResult result(impl_->zone_table.findZone(name));
+ return (FindResult(result.code, result.zone));
+}
+
+namespace {
+
+class MemoryIterator : public ZoneIterator {
+private:
+ RBTreeNodeChain<Domain> chain_;
+ Domain::const_iterator dom_iterator_;
+ const DomainTree& tree_;
+ const DomainNode* node_;
+ bool ready_;
+public:
+ MemoryIterator(const DomainTree& tree, const Name& origin) :
+ tree_(tree),
+ ready_(true)
+ {
+ // Find the first node (origin) and preserve the node chain for future
+ // searches
+ DomainTree::Result result(tree_.find<void*>(origin, &node_, chain_,
+ NULL, NULL));
+ // It can't happen that the origin is not in there
+ if (result != DomainTree::EXACTMATCH) {
+ isc_throw(Unexpected,
+ "In-memory zone corrupted, missing origin node");
+ }
+ // Initialize the iterator if there's somewhere to point to
+ if (node_ != NULL && node_->getData() != DomainPtr()) {
+ dom_iterator_ = node_->getData()->begin();
+ }
+ }
+
+ virtual ConstRRsetPtr getNextRRset() {
+ if (!ready_) {
+ isc_throw(Unexpected, "Iterating past the zone end");
+ }
+ /*
+ * This cycle finds the first nonempty node with yet unused RRset.
+ * If it is NULL, we run out of nodes. If it is empty, it doesn't
+ * contain any RRsets. If we are at the end, just get to next one.
+ */
+ while (node_ != NULL && (node_->getData() == DomainPtr() ||
+ dom_iterator_ == node_->getData()->end())) {
+ node_ = tree_.nextNode(chain_);
+ // If there's a node, initialize the iterator and check next time
+ // if the map is empty or not
+ if (node_ != NULL && node_->getData() != NULL) {
+ dom_iterator_ = node_->getData()->begin();
+ }
+ }
+ if (node_ == NULL) {
+ // That's all, folks
+ ready_ = false;
+ return (ConstRRsetPtr());
+ }
+ // The iterator points to the next yet unused RRset now
+ ConstRRsetPtr result(dom_iterator_->second);
+ // This one is used, move it to the next time for next call
+ ++dom_iterator_;
+
+ return (result);
+ }
+};
+
+} // End of anonymous namespace
+
+ZoneIteratorPtr
+InMemoryClient::getIterator(const Name& name) const {
+ ZoneTable::FindResult result(impl_->zone_table.findZone(name));
+ if (result.code != result::SUCCESS) {
+ isc_throw(DataSourceError, "No such zone: " + name.toText());
+ }
+
+ const InMemoryZoneFinder*
+ zone(dynamic_cast<const InMemoryZoneFinder*>(result.zone.get()));
+ if (zone == NULL) {
+ /*
+ * TODO: This can happen only during some of the tests and only as
+ * a temporary solution. This should be fixed by #1159 and then
+ * this cast and check shouldn't be necessary. We don't have
+ * test for handling a "can not happen" condition.
+ */
+ isc_throw(Unexpected, "The zone at " + name.toText() +
+ " is not InMemoryZoneFinder");
+ }
+ return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name)));
}
+
} // end of namespace datasrc
} // end of namespace dns
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 9bed960..6cd1753 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -58,10 +58,10 @@ public:
//@}
/// \brief Returns the origin of the zone.
- virtual const isc::dns::Name& getOrigin() const;
+ virtual isc::dns::Name getOrigin() const;
/// \brief Returns the class of the zone.
- virtual const isc::dns::RRClass& getClass() const;
+ virtual isc::dns::RRClass getClass() const;
/// \brief Looks up an RRset in the zone.
///
@@ -73,7 +73,7 @@ public:
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
isc::dns::RRsetList* target = NULL,
- const FindOptions options = FIND_DEFAULT) const;
+ const FindOptions options = FIND_DEFAULT);
/// \brief Inserts an rrset into the zone.
///
@@ -182,6 +182,11 @@ private:
struct InMemoryZoneFinderImpl;
InMemoryZoneFinderImpl* impl_;
//@}
+ // The friend here is for InMemoryClient::getIterator. The iterator
+ // needs to access the data inside the zone, so the InMemoryClient
+ // extracts the pointer to data and puts it into the iterator.
+ // The access is read only.
+ friend class InMemoryClient;
};
/// \brief A data source client that holds all necessary data in memory.
@@ -258,6 +263,9 @@ public:
/// For other details see \c DataSourceClient::findZone().
virtual FindResult findZone(const isc::dns::Name& name) const;
+ /// \brief Implementation of the getIterator method
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const;
+
private:
// TODO: Do we still need the PImpl if nobody should manipulate this class
// directly any more (it should be handled through DataSourceClient)?
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
new file mode 100644
index 0000000..e604cf9
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -0,0 +1,472 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sqlite3.h>
+
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/logger.h>
+#include <datasrc/data_source.h>
+#include <util/filename.h>
+
+#include <boost/lexical_cast.hpp>
+
+namespace isc {
+namespace datasrc {
+
+struct SQLite3Parameters {
+ SQLite3Parameters() :
+ db_(NULL), version_(-1),
+ q_zone_(NULL)
+ /*q_record_(NULL), q_addrs_(NULL), q_referral_(NULL),
+ q_count_(NULL), q_previous_(NULL), q_nsec3_(NULL),
+ q_prevnsec3_(NULL) */
+ {}
+ sqlite3* db_;
+ int version_;
+ sqlite3_stmt* q_zone_;
+ /*
+ TODO: Yet unneeded statements
+ sqlite3_stmt* q_record_;
+ sqlite3_stmt* q_addrs_;
+ sqlite3_stmt* q_referral_;
+ sqlite3_stmt* q_count_;
+ sqlite3_stmt* q_previous_;
+ sqlite3_stmt* q_nsec3_;
+ sqlite3_stmt* q_prevnsec3_;
+ */
+};
+
+SQLite3Database::SQLite3Database(const std::string& filename,
+ const isc::dns::RRClass& rrclass) :
+ dbparameters_(new SQLite3Parameters),
+ class_(rrclass.toText()),
+ database_name_("sqlite3_" +
+ isc::util::Filename(filename).nameAndExtension())
+{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_NEWCONN);
+
+ open(filename);
+}
+
+namespace {
+
+// This is a helper class to initialize a Sqlite3 DB safely. An object of
+// this class encapsulates all temporary resources that are necessary for
+// the initialization, and release them in the destructor. Once everything
+// is properly initialized, the move() method moves the allocated resources
+// to the main object in an exception free manner. This way, the main code
+// for the initialization can be exception safe, and can provide the strong
+// exception guarantee.
+class Initializer {
+public:
+ ~Initializer() {
+ if (params_.q_zone_ != NULL) {
+ sqlite3_finalize(params_.q_zone_);
+ }
+ /*
+ if (params_.q_record_ != NULL) {
+ sqlite3_finalize(params_.q_record_);
+ }
+ if (params_.q_addrs_ != NULL) {
+ sqlite3_finalize(params_.q_addrs_);
+ }
+ if (params_.q_referral_ != NULL) {
+ sqlite3_finalize(params_.q_referral_);
+ }
+ if (params_.q_count_ != NULL) {
+ sqlite3_finalize(params_.q_count_);
+ }
+ if (params_.q_previous_ != NULL) {
+ sqlite3_finalize(params_.q_previous_);
+ }
+ if (params_.q_nsec3_ != NULL) {
+ sqlite3_finalize(params_.q_nsec3_);
+ }
+ if (params_.q_prevnsec3_ != NULL) {
+ sqlite3_finalize(params_.q_prevnsec3_);
+ }
+ */
+ if (params_.db_ != NULL) {
+ sqlite3_close(params_.db_);
+ }
+ }
+ void move(SQLite3Parameters* dst) {
+ *dst = params_;
+ params_ = SQLite3Parameters(); // clear everything
+ }
+ SQLite3Parameters params_;
+};
+
+const char* const SCHEMA_LIST[] = {
+ "CREATE TABLE schema_version (version INTEGER NOT NULL)",
+ "INSERT INTO schema_version VALUES (1)",
+ "CREATE TABLE zones (id INTEGER PRIMARY KEY, "
+ "name STRING NOT NULL COLLATE NOCASE, "
+ "rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN', "
+ "dnssec BOOLEAN NOT NULL DEFAULT 0)",
+ "CREATE INDEX zones_byname ON zones (name)",
+ "CREATE TABLE records (id INTEGER PRIMARY KEY, "
+ "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+ "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
+ "CREATE INDEX records_byname ON records (name)",
+ "CREATE INDEX records_byrname ON records (rname)",
+ "CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
+ "hash STRING NOT NULL COLLATE NOCASE, "
+ "owner STRING NOT NULL COLLATE NOCASE, "
+ "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
+ "CREATE INDEX nsec3_byhash ON nsec3 (hash)",
+ NULL
+};
+
+const char* const q_zone_str = "SELECT id FROM zones WHERE name=?1 AND rdclass = ?2";
+
+// note that the order of the SELECT values is specifically chosen to match
+// the enum values in RecordColumns
+const char* const q_any_str = "SELECT rdtype, ttl, sigtype, rdata "
+ "FROM records WHERE zone_id=?1 AND name=?2";
+
+// note that the order of the SELECT values is specifically chosen to match
+// the enum values in RecordColumns
+const char* const q_iterate_str = "SELECT rdtype, ttl, sigtype, rdata, name FROM records "
+ "WHERE zone_id = ?1 "
+ "ORDER BY name, rdtype";
+
+/* TODO: Prune the statements, not everything will be needed maybe?
+const char* const q_record_str = "SELECT rdtype, ttl, sigtype, rdata "
+ "FROM records WHERE zone_id=?1 AND name=?2 AND "
+ "((rdtype=?3 OR sigtype=?3) OR "
+ "(rdtype='CNAME' OR sigtype='CNAME') OR "
+ "(rdtype='NS' OR sigtype='NS'))";
+
+const char* const q_addrs_str = "SELECT rdtype, ttl, sigtype, rdata "
+ "FROM records WHERE zone_id=?1 AND name=?2 AND "
+ "(rdtype='A' OR sigtype='A' OR rdtype='AAAA' OR sigtype='AAAA')";
+
+const char* const q_referral_str = "SELECT rdtype, ttl, sigtype, rdata FROM "
+ "records WHERE zone_id=?1 AND name=?2 AND"
+ "(rdtype='NS' OR sigtype='NS' OR rdtype='DS' OR sigtype='DS' OR "
+ "rdtype='DNAME' OR sigtype='DNAME')";
+
+const char* const q_count_str = "SELECT COUNT(*) FROM records "
+ "WHERE zone_id=?1 AND rname LIKE (?2 || '%');";
+
+const char* const q_previous_str = "SELECT name FROM records "
+ "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+ "rname < $2 ORDER BY rname DESC LIMIT 1";
+
+const char* const q_nsec3_str = "SELECT rdtype, ttl, rdata FROM nsec3 "
+ "WHERE zone_id = ?1 AND hash = $2";
+
+const char* const q_prevnsec3_str = "SELECT hash FROM nsec3 "
+ "WHERE zone_id = ?1 AND hash <= $2 ORDER BY hash DESC LIMIT 1";
+ */
+
+sqlite3_stmt*
+prepare(sqlite3* const db, const char* const statement) {
+ sqlite3_stmt* prepared = NULL;
+ if (sqlite3_prepare_v2(db, statement, -1, &prepared, NULL) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not prepare SQLite statement: " <<
+ statement);
+ }
+ return (prepared);
+}
+
+void
+checkAndSetupSchema(Initializer* initializer) {
+ sqlite3* const db = initializer->params_.db_;
+
+ sqlite3_stmt* prepared = NULL;
+ if (sqlite3_prepare_v2(db, "SELECT version FROM schema_version", -1,
+ &prepared, NULL) == SQLITE_OK &&
+ sqlite3_step(prepared) == SQLITE_ROW) {
+ initializer->params_.version_ = sqlite3_column_int(prepared, 0);
+ sqlite3_finalize(prepared);
+ } else {
+ logger.info(DATASRC_SQLITE_SETUP);
+ if (prepared != NULL) {
+ sqlite3_finalize(prepared);
+ }
+ for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
+ if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
+ SQLITE_OK) {
+ isc_throw(SQLite3Error,
+ "Failed to set up schema " << SCHEMA_LIST[i]);
+ }
+ }
+ }
+
+ initializer->params_.q_zone_ = prepare(db, q_zone_str);
+ /* TODO: Yet unneeded statements
+ initializer->params_.q_record_ = prepare(db, q_record_str);
+ initializer->params_.q_addrs_ = prepare(db, q_addrs_str);
+ initializer->params_.q_referral_ = prepare(db, q_referral_str);
+ initializer->params_.q_count_ = prepare(db, q_count_str);
+ initializer->params_.q_previous_ = prepare(db, q_previous_str);
+ initializer->params_.q_nsec3_ = prepare(db, q_nsec3_str);
+ initializer->params_.q_prevnsec3_ = prepare(db, q_prevnsec3_str);
+ */
+}
+
+}
+
+void
+SQLite3Database::open(const std::string& name) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNOPEN).arg(name);
+ if (dbparameters_->db_ != NULL) {
+ // There shouldn't be a way to trigger this anyway
+ isc_throw(DataSourceError, "Duplicate SQLite open with " << name);
+ }
+
+ Initializer initializer;
+
+ if (sqlite3_open(name.c_str(), &initializer.params_.db_) != 0) {
+ isc_throw(SQLite3Error, "Cannot open SQLite database file: " << name);
+ }
+
+ checkAndSetupSchema(&initializer);
+ initializer.move(dbparameters_.get());
+}
+
+SQLite3Database::~SQLite3Database() {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_DROPCONN);
+ if (dbparameters_->db_ != NULL) {
+ close();
+ }
+}
+
+void
+SQLite3Database::close(void) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNCLOSE);
+ if (dbparameters_->db_ == NULL) {
+ isc_throw(DataSourceError,
+ "SQLite data source is being closed before open");
+ }
+
+ // XXX: sqlite3_finalize() could fail. What should we do in that case?
+ sqlite3_finalize(dbparameters_->q_zone_);
+ dbparameters_->q_zone_ = NULL;
+
+ /* TODO: Once they are needed or not, uncomment or drop
+ sqlite3_finalize(dbparameters->q_record_);
+ dbparameters->q_record_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_addrs_);
+ dbparameters->q_addrs_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_referral_);
+ dbparameters->q_referral_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_count_);
+ dbparameters->q_count_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_previous_);
+ dbparameters->q_previous_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_prevnsec3_);
+ dbparameters->q_prevnsec3_ = NULL;
+
+ sqlite3_finalize(dbparameters->q_nsec3_);
+ dbparameters->q_nsec3_ = NULL;
+ */
+
+ sqlite3_close(dbparameters_->db_);
+ dbparameters_->db_ = NULL;
+}
+
+std::pair<bool, int>
+SQLite3Database::getZone(const isc::dns::Name& name) const {
+ int rc;
+
+ // Take the statement (simple SELECT id FROM zones WHERE...)
+ // and prepare it (bind the parameters to it)
+ sqlite3_reset(dbparameters_->q_zone_);
+ rc = sqlite3_bind_text(dbparameters_->q_zone_, 1, name.toText().c_str(),
+ -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind " << name <<
+ " to SQL statement (zone)");
+ }
+ rc = sqlite3_bind_text(dbparameters_->q_zone_, 2, class_.c_str(), -1,
+ SQLITE_STATIC);
+ if (rc != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind " << class_ <<
+ " to SQL statement (zone)");
+ }
+
+ // Get the data there and see if it found anything
+ rc = sqlite3_step(dbparameters_->q_zone_);
+ std::pair<bool, int> result;
+ if (rc == SQLITE_ROW) {
+ result = std::pair<bool, int>(true,
+ sqlite3_column_int(dbparameters_->
+ q_zone_, 0));
+ return (result);
+ } else if (rc == SQLITE_DONE) {
+ result = std::pair<bool, int>(false, 0);
+ // Free resources
+ sqlite3_reset(dbparameters_->q_zone_);
+ return (result);
+ }
+
+ isc_throw(DataSourceError, "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ // Compilers might not realize isc_throw always throws
+ return (std::pair<bool, int>(false, 0));
+}
+
+
+class SQLite3Database::Context : public DatabaseAccessor::IteratorContext {
+public:
+ // Construct an iterator for all records. When constructed this
+ // way, the getNext() call will copy all fields
+ Context(const boost::shared_ptr<const SQLite3Database>& database, int id) :
+ iterator_type_(ITT_ALL),
+ database_(database),
+ statement_(NULL),
+ name_("")
+ {
+ // We create the statement now and then just keep getting data from it
+ statement_ = prepare(database->dbparameters_->db_, q_iterate_str);
+ bindZoneId(id);
+ }
+
+ // Construct an iterator for records with a specific name. When constructed
+ // this way, the getNext() call will copy all fields except name
+ Context(const boost::shared_ptr<const SQLite3Database>& database, int id,
+ const std::string& name) :
+ iterator_type_(ITT_NAME),
+ database_(database),
+ statement_(NULL),
+ name_(name)
+ {
+ // We create the statement now and then just keep getting data from it
+ statement_ = prepare(database->dbparameters_->db_, q_any_str);
+ bindZoneId(id);
+ bindName(name_);
+ }
+
+ bool getNext(std::string (&data)[COLUMN_COUNT]) {
+ // If there's another row, get it
+ // If finalize has been called (e.g. when previous getNext() got
+ // SQLITE_DONE), directly return false
+ if (statement_ == NULL) {
+ return false;
+ }
+ const int rc(sqlite3_step(statement_));
+ if (rc == SQLITE_ROW) {
+ // For both types, we copy the first four columns
+ copyColumn(data, TYPE_COLUMN);
+ copyColumn(data, TTL_COLUMN);
+ copyColumn(data, SIGTYPE_COLUMN);
+ copyColumn(data, RDATA_COLUMN);
+ // Only copy Name if we are iterating over every record
+ if (iterator_type_ == ITT_ALL) {
+ copyColumn(data, NAME_COLUMN);
+ }
+ return (true);
+ } else if (rc != SQLITE_DONE) {
+ isc_throw(DataSourceError,
+ "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(database_->dbparameters_->db_));
+ }
+ finalize();
+ return (false);
+ }
+
+ virtual ~Context() {
+ finalize();
+ }
+
+private:
+ // Depending on which constructor is called, behaviour is slightly
+ // different. We keep track of what to do with the iterator type
+ // See description of getNext() and the constructors
+ enum IteratorType {
+ ITT_ALL,
+ ITT_NAME
+ };
+
+ void copyColumn(std::string (&data)[COLUMN_COUNT], int column) {
+ data[column] = convertToPlainChar(sqlite3_column_text(statement_,
+ column));
+ }
+
+ void bindZoneId(const int zone_id) {
+ if (sqlite3_bind_int(statement_, 1, zone_id) != SQLITE_OK) {
+ finalize();
+ isc_throw(SQLite3Error, "Could not bind int " << zone_id <<
+ " to SQL statement: " <<
+ sqlite3_errmsg(database_->dbparameters_->db_));
+ }
+ }
+
+ void bindName(const std::string& name) {
+ if (sqlite3_bind_text(statement_, 2, name.c_str(), -1,
+ SQLITE_STATIC) != SQLITE_OK) {
+ const char* errmsg = sqlite3_errmsg(database_->dbparameters_->db_);
+ finalize();
+ isc_throw(SQLite3Error, "Could not bind text '" << name <<
+ "' to SQL statement: " << errmsg);
+ }
+ }
+
+ void finalize() {
+ sqlite3_finalize(statement_);
+ statement_ = NULL;
+ }
+
+ // This helper method converts from the unsigned char* type (used by
+ // sqlite3) to char* (wanted by std::string). Technically these types
+ // might not be directly convertable
+ // In case sqlite3_column_text() returns NULL, we just make it an
+ // empty string, unless it was caused by a memory error
+ const char* convertToPlainChar(const unsigned char* ucp) {
+ if (ucp == NULL) {
+ // The field can really be NULL, in which case we return an
+ // empty string, or sqlite may have run out of memory, in
+ // which case we raise an error
+ if (sqlite3_errcode(database_->dbparameters_->db_)
+ == SQLITE_NOMEM) {
+ isc_throw(DataSourceError,
+ "Sqlite3 backend encountered a memory allocation "
+ "error in sqlite3_column_text()");
+ } else {
+ return ("");
+ }
+ }
+ const void* p = ucp;
+ return (static_cast<const char*>(p));
+ }
+
+ const IteratorType iterator_type_;
+ boost::shared_ptr<const SQLite3Database> database_;
+ sqlite3_stmt *statement_;
+ const std::string name_;
+};
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Database::getRecords(const std::string& name, int id) const {
+ return (IteratorContextPtr(new Context(shared_from_this(), id, name)));
+}
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Database::getAllRecords(int id) const {
+ return (IteratorContextPtr(new Context(shared_from_this(), id)));
+}
+
+}
+}
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
new file mode 100644
index 0000000..50b15e7
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -0,0 +1,147 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#ifndef __DATASRC_SQLITE3_ACCESSOR_H
+#define __DATASRC_SQLITE3_ACCESSOR_H
+
+#include <datasrc/database.h>
+
+#include <exceptions/exceptions.h>
+
+#include <boost/enable_shared_from_this.hpp>
+#include <boost/scoped_ptr.hpp>
+#include <string>
+
+namespace isc {
+namespace dns {
+class RRClass;
+}
+
+namespace datasrc {
+
+/**
+ * \brief Low-level database error
+ *
+ * This exception is thrown when the SQLite library complains about something.
+ * It might mean corrupt database file, invalid request or that something is
+ * rotten in the library.
+ */
+class SQLite3Error : public Exception {
+public:
+ SQLite3Error(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+struct SQLite3Parameters;
+
+/**
+ * \brief Concrete implementation of DatabaseAccessor for SQLite3 databases
+ *
+ * This opens one database file with our schema and serves data from there.
+ * According to the design, it doesn't interpret the data in any way, it just
+ * provides unified access to the DB.
+ */
+class SQLite3Database : public DatabaseAccessor,
+ public boost::enable_shared_from_this<SQLite3Database> {
+public:
+ /**
+ * \brief Constructor
+ *
+ * This opens the database and becomes ready to serve data from there.
+ *
+ * \exception SQLite3Error will be thrown if the given database file
+ * doesn't work (it is broken, doesn't exist and can't be created, etc).
+ *
+ * \param filename The database file to be used.
+ * \param rrclass Which class of data it should serve (while the database
+ * file can contain multiple classes of data, single database can
+ * provide only one class).
+ */
+ SQLite3Database(const std::string& filename,
+ const isc::dns::RRClass& rrclass);
+ /**
+ * \brief Destructor
+ *
+ * Closes the database.
+ */
+ ~SQLite3Database();
+
+ /**
+ * \brief Look up a zone
+ *
+ * This implements the getZone from DatabaseAccessor and looks up a zone
+ * in the data. It looks for a zone with the exact given origin and class
+ * passed to the constructor.
+ *
+ * \exception SQLite3Error if something about the database is broken.
+ *
+ * \param name The name of zone to look up
+ * \return The pair contains if the lookup was successful in the first
+ * element and the zone id in the second if it was.
+ */
+ virtual std::pair<bool, int> getZone(const isc::dns::Name& name) const;
+
+ /** \brief Look up all resource records for a name
+ *
+ * This implements the getRecords() method from DatabaseAccessor
+ *
+ * \exception SQLite3Error if there is an sqlite3 error when performing
+ * the query
+ *
+ * \param name the name to look up
+ * \param id the zone id, as returned by getZone()
+ * \return Iterator that contains all records with the given name
+ */
+ virtual IteratorContextPtr getRecords(const std::string& name,
+ int id) const;
+
+ /** \brief Look up all resource records for a zone
+ *
+ * This implements the getRecords() method from DatabaseAccessor
+ *
+ * \exception SQLite3Error if there is an sqlite3 error when performing
+ * the query
+ *
+ * \param id the zone id, as returned by getZone()
+ * \return Iterator that contains all records in the given zone
+ */
+ virtual IteratorContextPtr getAllRecords(int id) const;
+
+ /// The SQLite3 implementation of this method returns a string starting
+ /// with a fixed prefix of "sqlite3_" followed by the DB file name
+ /// removing any path name. For example, for the DB file
+ /// /somewhere/in/the/system/bind10.sqlite3, this method will return
+ /// "sqlite3_bind10.sqlite3".
+ virtual const std::string& getDBName() const { return (database_name_); }
+
+private:
+ /// \brief Private database data
+ boost::scoped_ptr<SQLite3Parameters> dbparameters_;
+ /// \brief The class for which the queries are done
+ const std::string class_;
+ /// \brief Opens the database
+ void open(const std::string& filename);
+ /// \brief Closes the database
+ void close();
+ /// \brief SQLite3 implementation of IteratorContext
+ class Context;
+ friend class Context;
+ const std::string database_name_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/datasrc/static_datasrc.cc b/src/lib/datasrc/static_datasrc.cc
index 65229a0..fd43e1c 100644
--- a/src/lib/datasrc/static_datasrc.cc
+++ b/src/lib/datasrc/static_datasrc.cc
@@ -70,6 +70,7 @@ StaticDataSrcImpl::StaticDataSrcImpl() :
authors = RRsetPtr(new RRset(authors_name, RRClass::CH(),
RRType::TXT(), RRTTL(0)));
authors->addRdata(generic::TXT("Chen Zhengzhang")); // Jerry
+ authors->addRdata(generic::TXT("Dmitriy Volodin"));
authors->addRdata(generic::TXT("Evan Hunt"));
authors->addRdata(generic::TXT("Haidong Wang")); // Ocean
authors->addRdata(generic::TXT("Han Feng"));
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index ffedb75..a913818 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -28,6 +28,9 @@ run_unittests_SOURCES += rbtree_unittest.cc
run_unittests_SOURCES += zonetable_unittest.cc
run_unittests_SOURCES += memory_datasrc_unittest.cc
run_unittests_SOURCES += logger_unittest.cc
+run_unittests_SOURCES += database_unittest.cc
+run_unittests_SOURCES += client_unittest.cc
+run_unittests_SOURCES += sqlite3_accessor_unittest.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
diff --git a/src/lib/datasrc/tests/cache_unittest.cc b/src/lib/datasrc/tests/cache_unittest.cc
index 96beae0..1325f64 100644
--- a/src/lib/datasrc/tests/cache_unittest.cc
+++ b/src/lib/datasrc/tests/cache_unittest.cc
@@ -202,15 +202,15 @@ TEST_F(CacheTest, retrieveFail) {
}
TEST_F(CacheTest, expire) {
- // Insert "foo" with a duration of 2 seconds; sleep 3. The
+ // Insert "foo" with a duration of 1 seconds; sleep 2. The
// record should not be returned from the cache even though it's
// at the top of the cache.
RRsetPtr aaaa(new RRset(Name("foo"), RRClass::IN(), RRType::AAAA(),
RRTTL(0)));
aaaa->addRdata(in::AAAA("2001:db8:3:bb::5"));
- cache.addPositive(aaaa, 0, 2);
+ cache.addPositive(aaaa, 0, 1);
- sleep(3);
+ sleep(2);
RRsetPtr r;
uint32_t f;
diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc
new file mode 100644
index 0000000..1a88f18
--- /dev/null
+++ b/src/lib/datasrc/tests/client_unittest.cc
@@ -0,0 +1,47 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <datasrc/client.h>
+
+#include <dns/name.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using isc::dns::Name;
+
+namespace {
+
+/*
+ * The DataSourceClient can't be created as it has pure virtual methods.
+ * So we implement them as NOPs and test the other methods.
+ */
+class NopClient : public DataSourceClient {
+public:
+ virtual FindResult findZone(const isc::dns::Name&) const {
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+ }
+};
+
+class ClientTest : public ::testing::Test {
+public:
+ NopClient client_;
+};
+
+// The default implementation is NotImplemented
+TEST_F(ClientTest, defaultIterator) {
+ EXPECT_THROW(client_.getIterator(Name(".")), isc::NotImplemented);
+}
+
+}
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
new file mode 100644
index 0000000..8ff8c55
--- /dev/null
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -0,0 +1,1115 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/name.h>
+#include <dns/rrttl.h>
+#include <dns/rrset.h>
+#include <exceptions/exceptions.h>
+
+#include <datasrc/database.h>
+#include <datasrc/zone.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
+
+#include <testutils/dnsmessage_test.h>
+
+#include <map>
+
+using namespace isc::datasrc;
+using namespace std;
+using namespace boost;
+using namespace isc::dns;
+
+namespace {
+
+/*
+ * An accessor with minimum implementation, keeping the original
+ * "NotImplemented" methods.
+ */
+class NopAccessor : public DatabaseAccessor {
+public:
+ NopAccessor() : database_name_("mock_database")
+ { }
+
+ virtual std::pair<bool, int> getZone(const Name& name) const {
+ if (name == Name("example.org")) {
+ return (std::pair<bool, int>(true, 42));
+ } else if (name == Name("null.example.org")) {
+ return (std::pair<bool, int>(true, 13));
+ } else if (name == Name("empty.example.org")) {
+ return (std::pair<bool, int>(true, 0));
+ } else if (name == Name("bad.example.org")) {
+ return (std::pair<bool, int>(true, -1));
+ } else {
+ return (std::pair<bool, int>(false, 0));
+ }
+ }
+
+ virtual const std::string& getDBName() const {
+ return (database_name_);
+ }
+
+ virtual IteratorContextPtr getRecords(const std::string&, int) const {
+ isc_throw(isc::NotImplemented,
+ "This database datasource can't be iterated");
+ };
+
+ virtual IteratorContextPtr getAllRecords(int) const {
+ isc_throw(isc::NotImplemented,
+ "This database datasource can't be iterated");
+ };
+private:
+ const std::string database_name_;
+
+};
+
+/*
+ * A virtual database connection that pretends it contains single zone --
+ * example.org.
+ *
+ * It has the same getZone method as NopConnection, but it provides
+ * implementation of the optional functionality.
+ */
+class MockAccessor : public NopAccessor {
+public:
+ MockAccessor()
+ {
+ fillData();
+ }
+private:
+ class MockNameIteratorContext : public IteratorContext {
+ public:
+ MockNameIteratorContext(const MockAccessor& mock_accessor, int zone_id,
+ const std::string& name) :
+ searched_name_(name), cur_record_(0)
+ {
+ // 'hardcoded' names to trigger exceptions
+ // On these names some exceptions are thrown, to test the robustness
+ // of the find() method.
+ if (searched_name_ == "dsexception.in.search.") {
+ isc_throw(DataSourceError, "datasource exception on search");
+ } else if (searched_name_ == "iscexception.in.search.") {
+ isc_throw(isc::Exception, "isc exception on search");
+ } else if (searched_name_ == "basicexception.in.search.") {
+ throw std::exception();
+ }
+
+ // we're not aiming for efficiency in this test, simply
+ // copy the relevant vector from records
+ if (zone_id == 42) {
+ if (mock_accessor.records.count(searched_name_) > 0) {
+ cur_name = mock_accessor.records.find(searched_name_)->second;
+ } else {
+ cur_name.clear();
+ }
+ } else {
+ cur_name.clear();
+ }
+ }
+
+ virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) {
+ if (searched_name_ == "dsexception.in.getnext.") {
+ isc_throw(DataSourceError, "datasource exception on getnextrecord");
+ } else if (searched_name_ == "iscexception.in.getnext.") {
+ isc_throw(isc::Exception, "isc exception on getnextrecord");
+ } else if (searched_name_ == "basicexception.in.getnext.") {
+ throw std::exception();
+ }
+
+ if (cur_record_ < cur_name.size()) {
+ for (size_t i = 0; i < COLUMN_COUNT; ++i) {
+ columns[i] = cur_name[cur_record_][i];
+ }
+ cur_record_++;
+ return (true);
+ } else {
+ return (false);
+ }
+ }
+
+ private:
+ const std::string searched_name_;
+ int cur_record_;
+ std::vector< std::vector<std::string> > cur_name;
+ };
+
+ class MockIteratorContext : public IteratorContext {
+ private:
+ int step;
+ public:
+ MockIteratorContext() :
+ step(0)
+ { }
+ virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ switch (step ++) {
+ case 0:
+ data[DatabaseAccessor::NAME_COLUMN] = "example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "SOA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200";
+ return (true);
+ case 1:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 2:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+ return (true);
+ case 3:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::1";
+ return (true);
+ case 4:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::2";
+ return (true);
+ default:
+ ADD_FAILURE() <<
+ "Request past the end of iterator context";
+ case 5:
+ return (false);
+ }
+ }
+ };
+ class EmptyIteratorContext : public IteratorContext {
+ public:
+ virtual bool getNext(string(&)[COLUMN_COUNT]) {
+ return (false);
+ }
+ };
+ class BadIteratorContext : public IteratorContext {
+ private:
+ int step;
+ public:
+ BadIteratorContext() :
+ step(0)
+ { }
+ virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ switch (step ++) {
+ case 0:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 1:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "301";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+ return (true);
+ default:
+ ADD_FAILURE() <<
+ "Request past the end of iterator context";
+ case 2:
+ return (false);
+ }
+ }
+ };
+public:
+ virtual IteratorContextPtr getAllRecords(int id) const {
+ if (id == 42) {
+ return (IteratorContextPtr(new MockIteratorContext()));
+ } else if (id == 13) {
+ return (IteratorContextPtr());
+ } else if (id == 0) {
+ return (IteratorContextPtr(new EmptyIteratorContext()));
+ } else if (id == -1) {
+ return (IteratorContextPtr(new BadIteratorContext()));
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
+ virtual IteratorContextPtr getRecords(const std::string& name, int id) const {
+ if (id == 42) {
+ return (IteratorContextPtr(new MockNameIteratorContext(*this, id, name)));
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
+private:
+ std::map<std::string, std::vector< std::vector<std::string> > > records;
+ // used as temporary storage during the building of the fake data
+ std::vector< std::vector<std::string> > cur_name;
+
+ // Adds one record to the current name in the database
+ // The actual data will not be added to 'records' until
+ // addCurName() is called
+ void addRecord(const std::string& name,
+ const std::string& type,
+ const std::string& sigtype,
+ const std::string& rdata) {
+ std::vector<std::string> columns;
+ columns.push_back(name);
+ columns.push_back(type);
+ columns.push_back(sigtype);
+ columns.push_back(rdata);
+ cur_name.push_back(columns);
+ }
+
+ // Adds all records we just built with calls to addRecords
+ // to the actual fake database. This will clear cur_name,
+ // so we can immediately start adding new records.
+ void addCurName(const std::string& name) {
+ ASSERT_EQ(0, records.count(name));
+ // Append the name to all of them
+ for (std::vector<std::vector<std::string> >::iterator
+ i(cur_name.begin()); i != cur_name.end(); ++ i) {
+ i->push_back(name);
+ }
+ records[name] = cur_name;
+ cur_name.clear();
+ }
+
+ // Fills the database with zone data.
+ // This method constructs a number of resource records (with addRecord),
+ // which will all be added for one domain name to the fake database
+ // (with addCurName). So for instance the first set of calls create
+ // data for the name 'www.example.org', which will consist of one A RRset
+ // of one record, and one AAAA RRset of two records.
+ // The order in which they are added is the order in which getNextRecord()
+ // will return them (so we can test whether find() etc. support data that
+ // might not come in 'normal' order)
+ // It shall immediately fail if you try to add the same name twice.
+ void fillData() {
+ // some plain data
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("AAAA", "3600", "", "2001:db8::1");
+ addRecord("AAAA", "3600", "", "2001:db8::2");
+ addCurName("www.example.org.");
+
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("AAAA", "3600", "", "2001:db8::1");
+ addRecord("A", "3600", "", "192.0.2.2");
+ addCurName("www2.example.org.");
+
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addCurName("cname.example.org.");
+
+ // some DNSSEC-'signed' data
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ addRecord("AAAA", "3600", "", "2001:db8::1");
+ addRecord("AAAA", "3600", "", "2001:db8::2");
+ addRecord("RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("signed1.example.org.");
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("signedcname1.example.org.");
+ // special case might fail; sig is for cname, which isn't there (should be ignored)
+ // (ignoring of 'normal' other type is done above by www.)
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("acnamesig1.example.org.");
+
+ // let's pretend we have a database that is not careful
+ // about the order in which it returns data
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("AAAA", "3600", "", "2001:db8::2");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("AAAA", "3600", "", "2001:db8::1");
+ addCurName("signed2.example.org.");
+ addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addCurName("signedcname2.example.org.");
+
+ addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("acnamesig2.example.org.");
+
+ addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("acnamesig3.example.org.");
+
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("A", "360", "", "192.0.2.2");
+ addCurName("ttldiff1.example.org.");
+ addRecord("A", "360", "", "192.0.2.1");
+ addRecord("A", "3600", "", "192.0.2.2");
+ addCurName("ttldiff2.example.org.");
+
+ // also add some intentionally bad data
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addCurName("badcname1.example.org.");
+
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("badcname2.example.org.");
+
+ addRecord("CNAME", "3600", "", "www.example.org.");
+ addRecord("CNAME", "3600", "", "www.example2.org.");
+ addCurName("badcname3.example.org.");
+
+ addRecord("A", "3600", "", "bad");
+ addCurName("badrdata.example.org.");
+
+ addRecord("BAD_TYPE", "3600", "", "192.0.2.1");
+ addCurName("badtype.example.org.");
+
+ addRecord("A", "badttl", "", "192.0.2.1");
+ addCurName("badttl.example.org.");
+
+ addRecord("A", "badttl", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "A 5 3 3600 somebaddata 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("badsig.example.org.");
+
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "TXT", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("badsigtype.example.org.");
+
+ // Data for testing delegation (with NS and DNAME)
+ addRecord("NS", "3600", "", "ns.example.com.");
+ addRecord("NS", "3600", "", "ns.delegation.example.org.");
+ addRecord("RRSIG", "3600", "", "NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("delegation.example.org.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("ns.delegation.example.org.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("deep.below.delegation.example.org.");
+
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("DNAME", "3600", "", "dname.example.com.");
+ addRecord("RRSIG", "3600", "", "DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("dname.example.org.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("below.dname.example.org.");
+
+ // Broken NS
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("NS", "3600", "", "ns.example.com.");
+ addCurName("brokenns1.example.org.");
+ addRecord("NS", "3600", "", "ns.example.com.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addCurName("brokenns2.example.org.");
+
+ // Now double DNAME, to test failure mode
+ addRecord("DNAME", "3600", "", "dname1.example.com.");
+ addRecord("DNAME", "3600", "", "dname2.example.com.");
+ addCurName("baddname.example.org.");
+
+ // Put some data into apex (including NS) so we can check our NS
+ // doesn't break anything
+ addRecord("NS", "3600", "", "ns.example.com.");
+ addRecord("A", "3600", "", "192.0.2.1");
+ addRecord("RRSIG", "3600", "", "NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE");
+ addCurName("example.org.");
+ }
+};
+
+// This tests the default getRecords behaviour, throwing NotImplemented
+TEST(DatabaseConnectionTest, getRecords) {
+ EXPECT_THROW(NopAccessor().getRecords(".", 1),
+ isc::NotImplemented);
+}
+
+// This tests the default getAllRecords behaviour, throwing NotImplemented
+TEST(DatabaseConnectionTest, getAllRecords) {
+ // The parameters don't matter
+ EXPECT_THROW(NopAccessor().getAllRecords(1),
+ isc::NotImplemented);
+}
+
+class DatabaseClientTest : public ::testing::Test {
+public:
+ DatabaseClientTest() {
+ createClient();
+ }
+ /*
+ * We initialize the client from a function, so we can call it multiple
+ * times per test.
+ */
+ void createClient() {
+ current_database_ = new MockAccessor();
+ client_.reset(new DatabaseClient(shared_ptr<DatabaseAccessor>(
+ current_database_)));
+ }
+ // Will be deleted by client_, just keep the current value for comparison.
+ MockAccessor* current_database_;
+ shared_ptr<DatabaseClient> client_;
+ const std::string database_name_;
+
+ /**
+ * Check the zone finder is a valid one and references the zone ID and
+ * database available here.
+ */
+ void checkZoneFinder(const DataSourceClient::FindResult& zone) {
+ ASSERT_NE(ZoneFinderPtr(), zone.zone_finder) << "No zone finder";
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ ASSERT_NE(shared_ptr<DatabaseClient::Finder>(), finder) <<
+ "Wrong type of finder";
+ EXPECT_EQ(42, finder->zone_id());
+ EXPECT_EQ(current_database_, &finder->database());
+ }
+
+ shared_ptr<DatabaseClient::Finder> getFinder() {
+ DataSourceClient::FindResult zone(
+ client_->findZone(Name("example.org")));
+ EXPECT_EQ(result::SUCCESS, zone.code);
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ EXPECT_EQ(42, finder->zone_id());
+
+ return (finder);
+ }
+
+ std::vector<std::string> expected_rdatas_;
+ std::vector<std::string> expected_sig_rdatas_;
+};
+
+TEST_F(DatabaseClientTest, zoneNotFound) {
+ DataSourceClient::FindResult zone(client_->findZone(Name("example.com")));
+ EXPECT_EQ(result::NOTFOUND, zone.code);
+}
+
+TEST_F(DatabaseClientTest, exactZone) {
+ DataSourceClient::FindResult zone(client_->findZone(Name("example.org")));
+ EXPECT_EQ(result::SUCCESS, zone.code);
+ checkZoneFinder(zone);
+}
+
+TEST_F(DatabaseClientTest, superZone) {
+ DataSourceClient::FindResult zone(client_->findZone(Name(
+ "sub.example.org")));
+ EXPECT_EQ(result::PARTIALMATCH, zone.code);
+ checkZoneFinder(zone);
+}
+
+TEST_F(DatabaseClientTest, noAccessorException) {
+ // We need a dummy variable here; some compiler would regard it a mere
+ // declaration instead of an instantiation and make the test fail.
+ EXPECT_THROW(DatabaseClient dummy((shared_ptr<DatabaseAccessor>())),
+ isc::InvalidParameter);
+}
+
+// If the zone doesn't exist, exception is thrown
+TEST_F(DatabaseClientTest, noZoneIterator) {
+ EXPECT_THROW(client_->getIterator(Name("example.com")), DataSourceError);
+}
+
+// If the zone doesn't exist and iteration is not implemented, it still throws
+// the exception it doesn't exist
+TEST_F(DatabaseClientTest, noZoneNotImplementedIterator) {
+ EXPECT_THROW(DatabaseClient(boost::shared_ptr<DatabaseAccessor>(
+ new NopAccessor())).getIterator(Name("example.com")),
+ DataSourceError);
+}
+
+TEST_F(DatabaseClientTest, notImplementedIterator) {
+ EXPECT_THROW(DatabaseClient(shared_ptr<DatabaseAccessor>(
+ new NopAccessor())).getIterator(Name("example.org")),
+ isc::NotImplemented);
+}
+
+// Pretend a bug in the connection and pass NULL as the context
+// Should not crash, but gracefully throw
+TEST_F(DatabaseClientTest, nullIteratorContext) {
+ EXPECT_THROW(client_->getIterator(Name("null.example.org")),
+ isc::Unexpected);
+}
+
+// It doesn't crash or anything if the zone is completely empty
+TEST_F(DatabaseClientTest, emptyIterator) {
+ ZoneIteratorPtr it(client_->getIterator(Name("empty.example.org")));
+ EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
+ // This is past the end, it should throw
+ EXPECT_THROW(it->getNextRRset(), isc::Unexpected);
+}
+
+// Iterate trough a zone
+TEST_F(DatabaseClientTest, iterator) {
+ ZoneIteratorPtr it(client_->getIterator(Name("example.org")));
+ ConstRRsetPtr rrset(it->getNextRRset());
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+ EXPECT_EQ(Name("example.org"), rrset->getName());
+ EXPECT_EQ(RRClass::IN(), rrset->getClass());
+ EXPECT_EQ(RRType::SOA(), rrset->getType());
+ EXPECT_EQ(RRTTL(300), rrset->getTTL());
+ RdataIteratorPtr rit(rrset->getRdataIterator());
+ ASSERT_FALSE(rit->isLast());
+ rit->next();
+ EXPECT_TRUE(rit->isLast());
+
+ rrset = it->getNextRRset();
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+ EXPECT_EQ(Name("x.example.org"), rrset->getName());
+ EXPECT_EQ(RRClass::IN(), rrset->getClass());
+ EXPECT_EQ(RRType::A(), rrset->getType());
+ EXPECT_EQ(RRTTL(300), rrset->getTTL());
+ rit = rrset->getRdataIterator();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("192.0.2.1", rit->getCurrent().toText());
+ rit->next();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("192.0.2.2", rit->getCurrent().toText());
+ rit->next();
+ EXPECT_TRUE(rit->isLast());
+
+ rrset = it->getNextRRset();
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+ EXPECT_EQ(Name("x.example.org"), rrset->getName());
+ EXPECT_EQ(RRClass::IN(), rrset->getClass());
+ EXPECT_EQ(RRType::AAAA(), rrset->getType());
+ EXPECT_EQ(RRTTL(300), rrset->getTTL());
+ EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
+ rit = rrset->getRdataIterator();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("2001:db8::1", rit->getCurrent().toText());
+ rit->next();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("2001:db8::2", rit->getCurrent().toText());
+ rit->next();
+ EXPECT_TRUE(rit->isLast());
+}
+
+// This has inconsistent TTL in the set (the rest, like nonsense in
+// the data is handled in rdata itself).
+TEST_F(DatabaseClientTest, badIterator) {
+ // It should not throw, but get the lowest one of them
+ ZoneIteratorPtr it(client_->getIterator(Name("bad.example.org")));
+ EXPECT_EQ(it->getNextRRset()->getTTL(), isc::dns::RRTTL(300));
+}
+
+// checks if the given rrset matches the
+// given name, class, type and rdatas
+void
+checkRRset(isc::dns::ConstRRsetPtr rrset,
+ const isc::dns::Name& name,
+ const isc::dns::RRClass& rrclass,
+ const isc::dns::RRType& rrtype,
+ const isc::dns::RRTTL& rrttl,
+ const std::vector<std::string>& rdatas) {
+ isc::dns::RRsetPtr expected_rrset(
+ new isc::dns::RRset(name, rrclass, rrtype, rrttl));
+ for (unsigned int i = 0; i < rdatas.size(); ++i) {
+ expected_rrset->addRdata(
+ isc::dns::rdata::createRdata(rrtype, rrclass,
+ rdatas[i]));
+ }
+ isc::testutils::rrsetCheck(expected_rrset, rrset);
+}
+
+void
+doFindTest(shared_ptr<DatabaseClient::Finder> finder,
+ const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ const isc::dns::RRType& expected_type,
+ const isc::dns::RRTTL expected_ttl,
+ ZoneFinder::Result expected_result,
+ const std::vector<std::string>& expected_rdatas,
+ const std::vector<std::string>& expected_sig_rdatas,
+ const isc::dns::Name& expected_name = isc::dns::Name::ROOT_NAME(),
+ const ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT)
+{
+ SCOPED_TRACE("doFindTest " + name.toText() + " " + type.toText());
+ ZoneFinder::FindResult result =
+ finder->find(name, type, NULL, options);
+ ASSERT_EQ(expected_result, result.code) << name << " " << type;
+ if (expected_rdatas.size() > 0) {
+ checkRRset(result.rrset, expected_name != Name(".") ? expected_name :
+ name, finder->getClass(), expected_type, expected_ttl,
+ expected_rdatas);
+
+ if (expected_sig_rdatas.size() > 0) {
+ checkRRset(result.rrset->getRRsig(), expected_name != Name(".") ?
+ expected_name : name, finder->getClass(),
+ isc::dns::RRType::RRSIG(), expected_ttl,
+ expected_sig_rdatas);
+ } else {
+ EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset->getRRsig());
+ }
+ } else {
+ EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset);
+ }
+}
+
+TEST_F(DatabaseClientTest, find) {
+ shared_ptr<DatabaseClient::Finder> finder(getFinder());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(finder, isc::dns::Name("www2.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("2001:db8::1");
+ expected_rdatas_.push_back("2001:db8::2");
+ doFindTest(finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::NXRRSET,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("www.example.org.");
+ doFindTest(finder, isc::dns::Name("cname.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::CNAME(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::CNAME,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("www.example.org.");
+ doFindTest(finder, isc::dns::Name("cname.example.org."),
+ isc::dns::RRType::CNAME(), isc::dns::RRType::CNAME(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("doesnotexist.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::NXDOMAIN,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("2001:db8::1");
+ expected_rdatas_.push_back("2001:db8::2");
+ expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::NXRRSET,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("www.example.org.");
+ expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signedcname1.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::CNAME(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::CNAME,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("2001:db8::2");
+ expected_rdatas_.push_back("2001:db8::1");
+ expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::NXRRSET,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("www.example.org.");
+ expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("signedcname2.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::CNAME(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::CNAME,
+ expected_rdatas_, expected_sig_rdatas_);
+
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("acnamesig1.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("acnamesig2.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("acnamesig3.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(finder, isc::dns::Name("ttldiff1.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(360),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(finder, isc::dns::Name("ttldiff2.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(360),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+
+
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname1.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname2.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname3.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badrdata.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badtype.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badttl.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badsig.example.org."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+
+ // Trigger the hardcoded exceptions and see if find() has cleaned up
+ EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.search."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.search."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ isc::Exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.search."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ std::exception);
+
+ EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.getnext."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.getnext."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ isc::Exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.getnext."),
+ isc::dns::RRType::A(),
+ NULL, ZoneFinder::FIND_DEFAULT),
+ std::exception);
+
+ // This RRSIG has the wrong sigtype field, which should be
+ // an error if we decide to keep using that field
+ // Right now the field is ignored, so it does not error
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("badsigtype.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600),
+ ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_);
+}
+
+TEST_F(DatabaseClientTest, findDelegation) {
+ shared_ptr<DatabaseClient::Finder> finder(getFinder());
+
+ // The apex should not be considered delegation point and we can access
+ // data
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(finder, isc::dns::Name("example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS, expected_rdatas_,
+ expected_sig_rdatas_);
+
+ expected_rdatas_.clear();
+ expected_rdatas_.push_back("ns.example.com.");
+ expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS, expected_rdatas_,
+ expected_sig_rdatas_);
+
+ // Check when we ask for something below delegation point, we get the NS
+ // (Both when the RRset there exists and doesn't)
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ expected_rdatas_.push_back("ns.example.com.");
+ expected_rdatas_.push_back("ns.delegation.example.org.");
+ expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::DELEGATION, expected_rdatas_,
+ expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ doFindTest(finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::DELEGATION, expected_rdatas_,
+ expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ doFindTest(finder, isc::dns::Name("deep.below.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::DELEGATION, expected_rdatas_,
+ expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+
+ // Even when we check directly at the delegation point, we should get
+ // the NS
+ doFindTest(finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::DELEGATION, expected_rdatas_,
+ expected_sig_rdatas_);
+
+ // And when we ask direcly for the NS, we should still get delegation
+ doFindTest(finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::DELEGATION, expected_rdatas_,
+ expected_sig_rdatas_);
+
+ // Now test delegation. If it is below the delegation point, we should get
+ // the DNAME (the one with data under DNAME is invalid zone, but we test
+ // the behaviour anyway just to make sure)
+ expected_rdatas_.clear();
+ expected_rdatas_.push_back("dname.example.com.");
+ expected_sig_rdatas_.clear();
+ expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::DNAME, expected_rdatas_,
+ expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ doFindTest(finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::DNAME, expected_rdatas_,
+ expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ doFindTest(finder, isc::dns::Name("really.deep.below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::DNAME, expected_rdatas_,
+ expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+
+ // Asking direcly for DNAME should give SUCCESS
+ doFindTest(finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::DNAME(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS, expected_rdatas_,
+ expected_sig_rdatas_);
+
+ // But we don't delegate at DNAME point
+ expected_rdatas_.clear();
+ expected_rdatas_.push_back("192.0.2.1");
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS, expected_rdatas_,
+ expected_sig_rdatas_);
+ expected_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600), ZoneFinder::NXRRSET, expected_rdatas_,
+ expected_sig_rdatas_);
+
+ // This is broken dname, it contains two targets
+ EXPECT_THROW(finder->find(isc::dns::Name("below.baddname.example.org."),
+ isc::dns::RRType::A(), NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+
+ // Broken NS - it lives together with something else
+ EXPECT_THROW(finder->find(isc::dns::Name("brokenns1.example.org."),
+ isc::dns::RRType::A(), NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("brokenns2.example.org."),
+ isc::dns::RRType::A(), NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+}
+
+// Glue-OK mode. Just go trough NS delegations down (but not trough
+// DNAME) and pretend it is not there.
+TEST_F(DatabaseClientTest, glueOK) {
+ shared_ptr<DatabaseClient::Finder> finder(getFinder());
+
+ expected_rdatas_.clear();
+ expected_sig_rdatas_.clear();
+ doFindTest(finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600), ZoneFinder::NXRRSET,
+ expected_rdatas_, expected_sig_rdatas_,
+ isc::dns::Name("ns.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ doFindTest(finder, isc::dns::Name("nothere.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ isc::dns::RRTTL(3600), ZoneFinder::NXDOMAIN,
+ expected_rdatas_, expected_sig_rdatas_,
+ isc::dns::Name("nothere.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::A(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_,
+ isc::dns::Name("ns.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ expected_rdatas_.clear();
+ expected_rdatas_.push_back("ns.example.com.");
+ expected_rdatas_.push_back("ns.delegation.example.org.");
+ expected_sig_rdatas_.clear();
+ expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ // When we request the NS, it should be SUCCESS, not DELEGATION
+ // (different in GLUE_OK)
+ doFindTest(finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ isc::dns::RRTTL(3600), ZoneFinder::SUCCESS,
+ expected_rdatas_, expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ expected_rdatas_.clear();
+ expected_rdatas_.push_back("dname.example.com.");
+ expected_sig_rdatas_.clear();
+ expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::A(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::DNAME, expected_rdatas_,
+ expected_sig_rdatas_, isc::dns::Name("dname.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ doFindTest(finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ isc::dns::RRTTL(3600), ZoneFinder::DNAME, expected_rdatas_,
+ expected_sig_rdatas_, isc::dns::Name("dname.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+}
+
+TEST_F(DatabaseClientTest, getOrigin) {
+ DataSourceClient::FindResult zone(client_->findZone(Name("example.org")));
+ ASSERT_EQ(result::SUCCESS, zone.code);
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ EXPECT_EQ(42, finder->zone_id());
+ EXPECT_EQ(isc::dns::Name("example.org"), finder->getOrigin());
+}
+
+}
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 22723fc..f47032f 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -29,6 +29,8 @@
#include <dns/masterload.h>
#include <datasrc/memory_datasrc.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
#include <gtest/gtest.h>
@@ -138,6 +140,43 @@ TEST_F(InMemoryClientTest, add_find_Zone) {
getOrigin());
}
+TEST_F(InMemoryClientTest, iterator) {
+ // Just some preparations of data
+ boost::shared_ptr<InMemoryZoneFinder>
+ zone(new InMemoryZoneFinder(RRClass::IN(), Name("a")));
+ RRsetPtr aRRsetA(new RRset(Name("a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ aRRsetA->addRdata(rdata::in::A("192.0.2.1"));
+ RRsetPtr aRRsetAAAA(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+ RRTTL(300)));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::1"));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::2"));
+ RRsetPtr subRRsetA(new RRset(Name("sub.x.a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ subRRsetA->addRdata(rdata::in::A("192.0.2.2"));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(zone));
+ // First, the zone is not there, so it should throw
+ EXPECT_THROW(memory_client.getIterator(Name("b")), DataSourceError);
+ // This zone is not there either, even when there's a zone containing this
+ EXPECT_THROW(memory_client.getIterator(Name("x.a")), DataSourceError);
+ // Now, an empty zone
+ ZoneIteratorPtr iterator(memory_client.getIterator(Name("a")));
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+ // It throws Unexpected when we are past the end
+ EXPECT_THROW(iterator->getNextRRset(), isc::Unexpected);
+ EXPECT_EQ(result::SUCCESS, zone->add(aRRsetA));
+ EXPECT_EQ(result::SUCCESS, zone->add(aRRsetAAAA));
+ EXPECT_EQ(result::SUCCESS, zone->add(subRRsetA));
+ // Check it with full zone, one by one.
+ // It should be in ascending order in case of InMemory data source
+ // (isn't guaranteed in general)
+ iterator = memory_client.getIterator(Name("a"));
+ EXPECT_EQ(aRRsetA, iterator->getNextRRset());
+ EXPECT_EQ(aRRsetAAAA, iterator->getNextRRset());
+ EXPECT_EQ(subRRsetA, iterator->getNextRRset());
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+}
+
TEST_F(InMemoryClientTest, getZoneCount) {
EXPECT_EQ(0, memory_client.getZoneCount());
memory_client.addZone(
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
new file mode 100644
index 0000000..5f7abaf
--- /dev/null
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -0,0 +1,332 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+#include <datasrc/sqlite3_accessor.h>
+
+#include <datasrc/data_source.h>
+
+#include <dns/rrclass.h>
+
+#include <gtest/gtest.h>
+#include <boost/scoped_ptr.hpp>
+
+using namespace isc::datasrc;
+using isc::data::ConstElementPtr;
+using isc::data::Element;
+using isc::dns::RRClass;
+using isc::dns::Name;
+
+namespace {
+// Some test data
+std::string SQLITE_DBFILE_EXAMPLE = TEST_DATA_DIR "/test.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE2 = TEST_DATA_DIR "/example2.com.sqlite3";
+std::string SQLITE_DBNAME_EXAMPLE2 = "sqlite3_example2.com.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE_ROOT = TEST_DATA_DIR "/test-root.sqlite3";
+std::string SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
+std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
+std::string SQLITE_DBFILE_MEMORY = ":memory:";
+std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+
+// The following file must be non existent and must be non"creatable";
+// the sqlite3 library will try to create a new DB file if it doesn't exist,
+// so to test a failure case the create operation should also fail.
+// The "nodir", a non existent directory, is inserted for this purpose.
+std::string SQLITE_DBFILE_NOTEXIST = TEST_DATA_DIR "/nodir/notexist";
+
+// Opening works (the content is tested in different tests)
+TEST(SQLite3Open, common) {
+ EXPECT_NO_THROW(SQLite3Database db(SQLITE_DBFILE_EXAMPLE,
+ RRClass::IN()));
+}
+
+// The file can't be opened
+TEST(SQLite3Open, notExist) {
+ EXPECT_THROW(SQLite3Database db(SQLITE_DBFILE_NOTEXIST,
+ RRClass::IN()), SQLite3Error);
+}
+
+// It rejects broken DB
+TEST(SQLite3Open, brokenDB) {
+ EXPECT_THROW(SQLite3Database db(SQLITE_DBFILE_BROKENDB,
+ RRClass::IN()), SQLite3Error);
+}
+
+// Test we can create the schema on the fly
+TEST(SQLite3Open, memoryDB) {
+ EXPECT_NO_THROW(SQLite3Database db(SQLITE_DBFILE_MEMORY,
+ RRClass::IN()));
+}
+
+// Test fixture for querying the db
+class SQLite3Access : public ::testing::Test {
+public:
+ SQLite3Access() {
+ initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::IN());
+ }
+ // So it can be re-created with different data
+ void initAccessor(const std::string& filename, const RRClass& rrclass) {
+ db.reset(new SQLite3Database(filename, rrclass));
+ }
+ // The tested db
+ boost::shared_ptr<SQLite3Database> db;
+};
+
+// This zone exists in the data, so it should be found
+TEST_F(SQLite3Access, getZone) {
+ std::pair<bool, int> result(db->getZone(Name("example.com")));
+ EXPECT_TRUE(result.first);
+ EXPECT_EQ(1, result.second);
+}
+
+// But it should find only the zone, nothing below it
+TEST_F(SQLite3Access, subZone) {
+ EXPECT_FALSE(db->getZone(Name("sub.example.com")).first);
+}
+
+// This zone is not there at all
+TEST_F(SQLite3Access, noZone) {
+ EXPECT_FALSE(db->getZone(Name("example.org")).first);
+}
+
+// This zone is there, but in different class
+TEST_F(SQLite3Access, noClass) {
+ initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::CH());
+ EXPECT_FALSE(db->getZone(Name("example.com")).first);
+}
+
+// This tests the iterator context
+TEST_F(SQLite3Access, iterator) {
+ // Our test zone is conveniently small, but not empty
+ initAccessor(SQLITE_DBFILE_EXAMPLE_ORG, RRClass::IN());
+
+ const std::pair<bool, int> zone_info(db->getZone(Name("example.org")));
+ ASSERT_TRUE(zone_info.first);
+
+ // Get the iterator context
+ DatabaseAccessor::IteratorContextPtr
+ context(db->getAllRecords(zone_info.second));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(),
+ context);
+
+ std::string data[DatabaseAccessor::COLUMN_COUNT];
+ // Get and check the first and only record
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("dname.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("dname.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("dname2.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("dname2.foo.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("MX", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("10 mail.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns1.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns2.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns3.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("SOA", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200",
+ data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.10", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("mail.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.101", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.1", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("www.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ // Check there's no other
+ EXPECT_FALSE(context->getNext(data));
+
+ // And make sure calling it again won't cause problems.
+ EXPECT_FALSE(context->getNext(data));
+}
+
+TEST(SQLite3Open, getDBNameExample2) {
+ SQLite3Database db(SQLITE_DBFILE_EXAMPLE2, RRClass::IN());
+ EXPECT_EQ(SQLITE_DBNAME_EXAMPLE2, db.getDBName());
+}
+
+TEST(SQLite3Open, getDBNameExampleROOT) {
+ SQLite3Database db(SQLITE_DBFILE_EXAMPLE_ROOT, RRClass::IN());
+ EXPECT_EQ(SQLITE_DBNAME_EXAMPLE_ROOT, db.getDBName());
+}
+
+// Simple function to cound the number of records for
+// any name
+void
+checkRecordRow(const std::string columns[],
+ const std::string& field0,
+ const std::string& field1,
+ const std::string& field2,
+ const std::string& field3,
+ const std::string& field4)
+{
+ EXPECT_EQ(field0, columns[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ(field1, columns[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ(field2, columns[DatabaseAccessor::SIGTYPE_COLUMN]);
+ EXPECT_EQ(field3, columns[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ(field4, columns[DatabaseAccessor::NAME_COLUMN]);
+}
+
+TEST_F(SQLite3Access, getRecords) {
+ const std::pair<bool, int> zone_info(db->getZone(Name("example.com")));
+ ASSERT_TRUE(zone_info.first);
+
+ const int zone_id = zone_info.second;
+ ASSERT_EQ(1, zone_id);
+
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+
+ DatabaseAccessor::IteratorContextPtr
+ context(db->getRecords("foo.bar", 1));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(),
+ context);
+ EXPECT_FALSE(context->getNext(columns));
+ checkRecordRow(columns, "", "", "", "", "");
+
+ // now try some real searches
+ context = db->getRecords("foo.example.com.", zone_id);
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "CNAME", "3600", "",
+ "cnametest.example.org.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "CNAME",
+ "CNAME 5 3 3600 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NSEC", "7200", "",
+ "mail.example.com. CNAME RRSIG NSEC", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 3 7200 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE", "");
+ EXPECT_FALSE(context->getNext(columns));
+ // with no more records, the array should not have been modified
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 3 7200 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE", "");
+
+ context = db->getRecords("example.com.", zone_id);
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "SOA", "3600", "",
+ "master.example.com. admin.example.com. "
+ "1234 3600 1800 2419200 7200", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "SOA",
+ "SOA 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "1200", "", "dns01.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "3600", "", "dns02.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "1800", "", "dns03.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "NS",
+ "NS 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "MX", "3600", "", "10 mail.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "MX", "3600", "",
+ "20 mail.subzone.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "MX",
+ "MX 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NSEC", "7200", "",
+ "cname-ext.example.com. NS SOA MX RRSIG NSEC DNSKEY", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 2 7200 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "DNSKEY", "3600", "",
+ "256 3 5 AwEAAcOUBllYc1hf7ND9uDy+Yz1BF3sI0m4q NGV7W"
+ "cTD0WEiuV7IjXgHE36fCmS9QsUxSSOV o1I/FMxI2PJVqTYHkX"
+ "FBS7AzLGsQYMU7UjBZ SotBJ6Imt5pXMu+lEDNy8TOUzG3xm7g"
+ "0qcbW YF6qCEfvZoBtAqi5Rk7Mlrqs8agxYyMx", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "DNSKEY", "3600", "",
+ "257 3 5 AwEAAe5WFbxdCPq2jZrZhlMj7oJdff3W7syJ tbvzg"
+ "62tRx0gkoCDoBI9DPjlOQG0UAbj+xUV 4HQZJStJaZ+fHU5AwV"
+ "NT+bBZdtV+NujSikhd THb4FYLg2b3Cx9NyJvAVukHp/91HnWu"
+ "G4T36 CzAFrfPwsHIrBz9BsaIQ21VRkcmj7DswfI/i DGd8j6b"
+ "qiODyNZYQ+ZrLmF0KIJ2yPN3iO6Zq 23TaOrVTjB7d1a/h31OD"
+ "fiHAxFHrkY3t3D5J R9Nsl/7fdRmSznwtcSDgLXBoFEYmw6p86"
+ "Acv RyoYNcL1SXjaKVLG5jyU3UR+LcGZT5t/0xGf oIK/aKwEN"
+ "rsjcKZZj660b1M=", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "4456 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ EXPECT_FALSE(context->getNext(columns));
+ // getnextrecord returning false should mean array is not altered
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+
+ // check that another getNext does not cause problems
+ EXPECT_FALSE(context->getNext(columns));
+}
+
+} // end anonymous namespace
diff --git a/src/lib/datasrc/tests/static_unittest.cc b/src/lib/datasrc/tests/static_unittest.cc
index a11e889..4c9fe42 100644
--- a/src/lib/datasrc/tests/static_unittest.cc
+++ b/src/lib/datasrc/tests/static_unittest.cc
@@ -53,6 +53,7 @@ protected:
// NOTE: in addition, the order of the following items matter.
authors_data.push_back("Chen Zhengzhang");
+ authors_data.push_back("Dmitriy Volodin");
authors_data.push_back("Evan Hunt");
authors_data.push_back("Haidong Wang");
authors_data.push_back("Han Feng");
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index 69785f0..0dacc5d 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -131,10 +131,10 @@ public:
/// These methods should never throw an exception.
//@{
/// Return the origin name of the zone.
- virtual const isc::dns::Name& getOrigin() const = 0;
+ virtual isc::dns::Name getOrigin() const = 0;
/// Return the RR class of the zone.
- virtual const isc::dns::RRClass& getClass() const = 0;
+ virtual isc::dns::RRClass getClass() const = 0;
//@}
///
@@ -197,7 +197,7 @@ public:
const isc::dns::RRType& type,
isc::dns::RRsetList* target = NULL,
const FindOptions options
- = FIND_DEFAULT) const = 0;
+ = FIND_DEFAULT) = 0;
//@}
};
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 4a0173c..3cd532f 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -31,6 +31,8 @@ EXTRA_DIST += rdata/generic/ds_43.cc
EXTRA_DIST += rdata/generic/ds_43.h
EXTRA_DIST += rdata/generic/mx_15.cc
EXTRA_DIST += rdata/generic/mx_15.h
+EXTRA_DIST += rdata/generic/naptr_35.cc
+EXTRA_DIST += rdata/generic/naptr_35.h
EXTRA_DIST += rdata/generic/ns_2.cc
EXTRA_DIST += rdata/generic/ns_2.h
EXTRA_DIST += rdata/generic/nsec3_50.cc
@@ -51,12 +53,18 @@ EXTRA_DIST += rdata/generic/soa_6.cc
EXTRA_DIST += rdata/generic/soa_6.h
EXTRA_DIST += rdata/generic/txt_16.cc
EXTRA_DIST += rdata/generic/txt_16.h
+EXTRA_DIST += rdata/generic/minfo_14.cc
+EXTRA_DIST += rdata/generic/minfo_14.h
+EXTRA_DIST += rdata/generic/afsdb_18.cc
+EXTRA_DIST += rdata/generic/afsdb_18.h
EXTRA_DIST += rdata/hs_4/a_1.cc
EXTRA_DIST += rdata/hs_4/a_1.h
EXTRA_DIST += rdata/in_1/a_1.cc
EXTRA_DIST += rdata/in_1/a_1.h
EXTRA_DIST += rdata/in_1/aaaa_28.cc
EXTRA_DIST += rdata/in_1/aaaa_28.h
+EXTRA_DIST += rdata/in_1/dhcid_49.cc
+EXTRA_DIST += rdata/in_1/dhcid_49.h
EXTRA_DIST += rdata/in_1/srv_33.cc
EXTRA_DIST += rdata/in_1/srv_33.h
#EXTRA_DIST += rdata/template.cc
diff --git a/src/lib/dns/rdata/generic/afsdb_18.cc b/src/lib/dns/rdata/generic/afsdb_18.cc
new file mode 100644
index 0000000..dd7fa5f
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.cc
@@ -0,0 +1,170 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+#include <util/strutil.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <boost/lexical_cast.hpp>
+
+using namespace std;
+using namespace isc::util::str;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c afsdb_str must be formatted as follows:
+/// \code <subtype> <server name>
+/// \endcode
+/// where server name field must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "1 server.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+AFSDB::AFSDB(const std::string& afsdb_str) :
+ subtype_(0), server_(Name::ROOT_NAME())
+{
+ istringstream iss(afsdb_str);
+
+ try {
+ const uint32_t subtype = tokenToNum<int32_t, 16>(getToken(iss));
+ const Name servername(getToken(iss));
+ string server;
+
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Unexpected input for AFSDB"
+ "RDATA: " << afsdb_str);
+ }
+
+ subtype_ = subtype;
+ server_ = servername;
+
+ } catch (const StringTokenError& ste) {
+ isc_throw(InvalidRdataText, "Invalid AFSDB text: " <<
+ ste.what() << ": " << afsdb_str);
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+AFSDB::AFSDB(InputBuffer& buffer, size_t) :
+ subtype_(buffer.readUint16()), server_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+AFSDB::AFSDB(const AFSDB& other) :
+ Rdata(), subtype_(other.subtype_), server_(other.server_)
+{}
+
+AFSDB&
+AFSDB::operator=(const AFSDB& source) {
+ subtype_ = source.subtype_;
+ server_ = source.server_;
+
+ return (*this);
+}
+
+/// \brief Convert the \c AFSDB to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c AFSDB(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c AFSDB object.
+string
+AFSDB::toText() const {
+ return (boost::lexical_cast<string>(subtype_) + " " + server_.toText());
+}
+
+/// \brief Render the \c AFSDB in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+AFSDB::toWire(OutputBuffer& buffer) const {
+ buffer.writeUint16(subtype_);
+ server_.toWire(buffer);
+}
+
+/// \brief Render the \c AFSDB in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE AFSDB is not "well-known", the server
+/// field (domain name) will not be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+AFSDB::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeUint16(subtype_);
+ renderer.writeName(server_, false);
+}
+
+/// \brief Compare two instances of \c AFSDB RDATA.
+///
+/// See documentation in \c Rdata.
+int
+AFSDB::compare(const Rdata& other) const {
+ const AFSDB& other_afsdb = dynamic_cast<const AFSDB&>(other);
+ if (subtype_ < other_afsdb.subtype_) {
+ return (-1);
+ } else if (subtype_ > other_afsdb.subtype_) {
+ return (1);
+ }
+
+ return (compareNames(server_, other_afsdb.server_));
+}
+
+const Name&
+AFSDB::getServer() const {
+ return (server_);
+}
+
+uint16_t
+AFSDB::getSubtype() const {
+ return (subtype_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/afsdb_18.h b/src/lib/dns/rdata/generic/afsdb_18.h
new file mode 100644
index 0000000..4a46775
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.h
@@ -0,0 +1,74 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::AFSDB class represents the AFSDB RDATA as defined %in
+/// RFC1183.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// AFSDB RDATA.
+class AFSDB : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// This method never throws an exception.
+ AFSDB& operator=(const AFSDB& source);
+ ///
+ /// Specialized methods
+ ///
+
+ /// \brief Return the value of the server field.
+ ///
+ /// \return A reference to a \c Name class object corresponding to the
+ /// internal server name.
+ ///
+ /// This method never throws an exception.
+ const Name& getServer() const;
+
+ /// \brief Return the value of the subtype field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getSubtype() const;
+
+private:
+ uint16_t subtype_;
+ Name server_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/minfo_14.cc b/src/lib/dns/rdata/generic/minfo_14.cc
new file mode 100644
index 0000000..734fbc3
--- /dev/null
+++ b/src/lib/dns/rdata/generic/minfo_14.cc
@@ -0,0 +1,155 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::dns;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c minfo_str must be formatted as follows:
+/// \code <rmailbox name> <emailbox name>
+/// \endcode
+/// where both fields must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "rmail.example.com. email.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation for names fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+MINFO::MINFO(const std::string& minfo_str) :
+ // We cannot construct both names in the initialization list due to the
+ // necessary text processing, so we have to initialize them with a dummy
+ // name and replace them later.
+ rmailbox_(Name::ROOT_NAME()), emailbox_(Name::ROOT_NAME())
+{
+ istringstream iss(minfo_str);
+ string rmailbox_str, emailbox_str;
+ iss >> rmailbox_str >> emailbox_str;
+
+ // Validation: A valid MINFO RR must have exactly two fields.
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText, "Invalid MINFO text: " << minfo_str);
+ }
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Invalid MINFO text (redundant field): "
+ << minfo_str);
+ }
+
+ rmailbox_ = Name(rmailbox_str);
+ emailbox_ = Name(emailbox_str);
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation for names fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+MINFO::MINFO(InputBuffer& buffer, size_t) :
+ rmailbox_(buffer), emailbox_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+MINFO::MINFO(const MINFO& other) :
+ Rdata(), rmailbox_(other.rmailbox_), emailbox_(other.emailbox_)
+{}
+
+/// \brief Convert the \c MINFO to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c MINFO(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c MINFO object.
+std::string
+MINFO::toText() const {
+ return (rmailbox_.toText() + " " + emailbox_.toText());
+}
+
+/// \brief Render the \c MINFO in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+MINFO::toWire(OutputBuffer& buffer) const {
+ rmailbox_.toWire(buffer);
+ emailbox_.toWire(buffer);
+}
+
+MINFO&
+MINFO::operator=(const MINFO& source) {
+ rmailbox_ = source.rmailbox_;
+ emailbox_ = source.emailbox_;
+
+ return (*this);
+}
+
+/// \brief Render the \c MINFO in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE MINFO is "well-known", the rmailbox and
+/// emailbox fields (domain names) will be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+MINFO::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeName(rmailbox_);
+ renderer.writeName(emailbox_);
+}
+
+/// \brief Compare two instances of \c MINFO RDATA.
+///
+/// See documentation in \c Rdata.
+int
+MINFO::compare(const Rdata& other) const {
+ const MINFO& other_minfo = dynamic_cast<const MINFO&>(other);
+
+ const int cmp = compareNames(rmailbox_, other_minfo.rmailbox_);
+ if (cmp != 0) {
+ return (cmp);
+ }
+ return (compareNames(emailbox_, other_minfo.emailbox_));
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/minfo_14.h b/src/lib/dns/rdata/generic/minfo_14.h
new file mode 100644
index 0000000..f3ee1d0
--- /dev/null
+++ b/src/lib/dns/rdata/generic/minfo_14.h
@@ -0,0 +1,82 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::generic::MINFO class represents the MINFO RDATA as
+/// defined in RFC1035.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// MINFO RDATA.
+class MINFO : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Define the assignment operator.
+ ///
+ /// \exception std::bad_alloc Memory allocation fails in copying
+ /// internal member variables (this should be very rare).
+ MINFO& operator=(const MINFO& source);
+
+ /// \brief Return the value of the rmailbox field.
+ ///
+ /// \exception std::bad_alloc If resource allocation for the returned
+ /// \c Name fails.
+ ///
+ /// \note
+ /// Unlike the case of some other RDATA classes (such as
+ /// \c NS::getNSName()), this method constructs a new \c Name object
+ /// and returns it, instead of returning a reference to a \c Name object
+ /// internally maintained in the class (which is a private member).
+ /// This is based on the observation that this method will be rarely
+ /// used and even when it's used it will not be in a performance context
+ /// (for example, a recursive resolver won't need this field in its
+ /// resolution process). By returning a new object we have flexibility
+ /// of changing the internal representation without the risk of changing
+ /// the interface or method property.
+ /// The same note applies to the \c getEmailbox() method.
+ Name getRmailbox() const { return (rmailbox_); }
+
+ /// \brief Return the value of the emailbox field.
+ ///
+ /// \exception std::bad_alloc If resource allocation for the returned
+ /// \c Name fails.
+ Name getEmailbox() const { return (emailbox_); }
+
+private:
+ Name rmailbox_;
+ Name emailbox_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/naptr_35.cc b/src/lib/dns/rdata/generic/naptr_35.cc
new file mode 100644
index 0000000..5268331
--- /dev/null
+++ b/src/lib/dns/rdata/generic/naptr_35.cc
@@ -0,0 +1,314 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <string>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace boost;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+namespace {
+/// Skip the left whitespaces of the input string
+///
+/// \param input_str The input string
+/// \param input_iterator From which the skipping started
+void
+skipLeftSpaces(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, field is missing.");
+ }
+
+ if (!isspace(*input_iterator)) {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, fields are not separated by space.");
+ }
+ // Skip white spaces
+ while (input_iterator < input_str.end() && isspace(*input_iterator)) {
+ ++input_iterator;
+ }
+}
+
+/// Get a <character-string> from a string
+///
+/// \param input_str The input string
+/// \param input_iterator The iterator from which to start extracting,
+/// the iterator will be updated to new position after the function
+/// is returned
+/// \return A std::string that contains the extracted <character-string>
+std::string
+getNextCharacterString(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ string result;
+
+ // If the input string only contains white-spaces, it is an invalid
+ // <character-string>
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText, "Invalid NAPTR text format, \
+ <character-string> field is missing.");
+ }
+
+ // Whether the <character-string> is separated with double quotes (")
+ bool quotes_separated = (*input_iterator == '"');
+
+ if (quotes_separated) {
+ ++input_iterator;
+ }
+
+ while(input_iterator < input_str.end()){
+ if (quotes_separated) {
+ // If the <character-string> is seperated with quotes symbol and
+ // another quotes symbol is encountered, it is the end of the
+ // <character-string>
+ if (*input_iterator == '"') {
+ ++input_iterator;
+ break;
+ }
+ } else if (*input_iterator == ' ') {
+ // If the <character-string> is not seperated with quotes symbol,
+ // it is seperated with <space> char
+ break;
+ }
+
+ result.push_back(*input_iterator);
+
+ ++input_iterator;
+ }
+
+ if (result.size() > MAX_CHARSTRING_LEN) {
+ isc_throw(CharStringTooLong, "NAPTR <character-string> is too long");
+ }
+
+ return (result);
+}
+
+/// Get a <character-string> from a input buffer
+///
+/// \param buffer The input buffer
+/// \param len The input buffer total length
+/// \return A std::string that contains the extracted <character-string>
+std::string
+getNextCharacterString(InputBuffer& buffer, size_t len) {
+ uint8_t str_len = buffer.readUint8();
+
+ size_t pos = buffer.getPosition();
+ if (len - pos < str_len) {
+ isc_throw(InvalidRdataLength, "Invalid NAPTR string length");
+ }
+
+ uint8_t buf[MAX_CHARSTRING_LEN];
+ buffer.readData(buf, str_len);
+ return (string(buf, buf + str_len));
+}
+
+} // Anonymous namespace
+
+NAPTR::NAPTR(InputBuffer& buffer, size_t len):
+ replacement_(".")
+{
+ order_ = buffer.readUint16();
+ preference_ = buffer.readUint16();
+
+ flags_ = getNextCharacterString(buffer, len);
+ services_ = getNextCharacterString(buffer, len);
+ regexp_ = getNextCharacterString(buffer, len);
+ replacement_ = Name(buffer);
+}
+
+NAPTR::NAPTR(const std::string& naptr_str):
+ replacement_(".")
+{
+ istringstream iss(naptr_str);
+ uint16_t order;
+ uint16_t preference;
+
+ iss >> order >> preference;
+
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText, "Invalid NAPTR text format");
+ }
+
+ order_ = order;
+ preference_ = preference;
+
+ string::const_iterator input_iterator = naptr_str.begin() + iss.tellg();
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ flags_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ services_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ regexp_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ if (input_iterator < naptr_str.end()) {
+ string replacementStr(input_iterator, naptr_str.end());
+
+ replacement_ = Name(replacementStr);
+ } else {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, replacement field is missing");
+ }
+}
+
+NAPTR::NAPTR(const NAPTR& naptr):
+ Rdata(), order_(naptr.order_), preference_(naptr.preference_),
+ flags_(naptr.flags_), services_(naptr.services_), regexp_(naptr.regexp_),
+ replacement_(naptr.replacement_)
+{
+}
+
+void
+NAPTR::toWire(OutputBuffer& buffer) const {
+ buffer.writeUint16(order_);
+ buffer.writeUint16(preference_);
+
+ buffer.writeUint8(flags_.size());
+ buffer.writeData(flags_.c_str(), flags_.size());
+
+ buffer.writeUint8(services_.size());
+ buffer.writeData(services_.c_str(), services_.size());
+
+ buffer.writeUint8(regexp_.size());
+ buffer.writeData(regexp_.c_str(), regexp_.size());
+
+ replacement_.toWire(buffer);
+}
+
+void
+NAPTR::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeUint16(order_);
+ renderer.writeUint16(preference_);
+
+ renderer.writeUint8(flags_.size());
+ renderer.writeData(flags_.c_str(), flags_.size());
+
+ renderer.writeUint8(services_.size());
+ renderer.writeData(services_.c_str(), services_.size());
+
+ renderer.writeUint8(regexp_.size());
+ renderer.writeData(regexp_.c_str(), regexp_.size());
+
+ replacement_.toWire(renderer);
+}
+
+string
+NAPTR::toText() const {
+ string result;
+ result += lexical_cast<string>(order_);
+ result += " ";
+ result += lexical_cast<string>(preference_);
+ result += " \"";
+ result += flags_;
+ result += "\" \"";
+ result += services_;
+ result += "\" \"";
+ result += regexp_;
+ result += "\" ";
+ result += replacement_.toText();
+ return (result);
+}
+
+int
+NAPTR::compare(const Rdata& other) const {
+ const NAPTR other_naptr = dynamic_cast<const NAPTR&>(other);
+
+ if (order_ < other_naptr.order_) {
+ return (-1);
+ } else if (order_ > other_naptr.order_) {
+ return (1);
+ }
+
+ if (preference_ < other_naptr.preference_) {
+ return (-1);
+ } else if (preference_ > other_naptr.preference_) {
+ return (1);
+ }
+
+ if (flags_ < other_naptr.flags_) {
+ return (-1);
+ } else if (flags_ > other_naptr.flags_) {
+ return (1);
+ }
+
+ if (services_ < other_naptr.services_) {
+ return (-1);
+ } else if (services_ > other_naptr.services_) {
+ return (1);
+ }
+
+ if (regexp_ < other_naptr.regexp_) {
+ return (-1);
+ } else if (regexp_ > other_naptr.regexp_) {
+ return (1);
+ }
+
+ return (compareNames(replacement_, other_naptr.replacement_));
+}
+
+uint16_t
+NAPTR::getOrder() const {
+ return (order_);
+}
+
+uint16_t
+NAPTR::getPreference() const {
+ return (preference_);
+}
+
+const std::string&
+NAPTR::getFlags() const {
+ return (flags_);
+}
+
+const std::string&
+NAPTR::getServices() const {
+ return (services_);
+}
+
+const std::string&
+NAPTR::getRegexp() const {
+ return (regexp_);
+}
+
+const Name&
+NAPTR::getReplacement() const {
+ return (replacement_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/naptr_35.h b/src/lib/dns/rdata/generic/naptr_35.h
new file mode 100644
index 0000000..b3015ae
--- /dev/null
+++ b/src/lib/dns/rdata/generic/naptr_35.h
@@ -0,0 +1,63 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <util/buffer.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c NAPTR class represents the NAPTR rdata defined in
+/// RFC2915, RFC2168 and RFC3403
+///
+/// This class implements the basic interfaces inherited from the
+/// \c rdata::Rdata class, and provides accessors specific to the
+/// NAPTR rdata.
+class NAPTR : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ // NAPTR specific methods
+ uint16_t getOrder() const;
+ uint16_t getPreference() const;
+ const std::string& getFlags() const;
+ const std::string& getServices() const;
+ const std::string& getRegexp() const;
+ const Name& getReplacement() const;
+private:
+ uint16_t order_;
+ uint16_t preference_;
+ std::string flags_;
+ std::string services_;
+ std::string regexp_;
+ Name replacement_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/rrsig_46.cc b/src/lib/dns/rdata/generic/rrsig_46.cc
index 0c82406..fc8e340 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.cc
+++ b/src/lib/dns/rdata/generic/rrsig_46.cc
@@ -243,5 +243,10 @@ RRSIG::compare(const Rdata& other) const {
}
}
+const RRType&
+RRSIG::typeCovered() {
+ return (impl_->covered_);
+}
+
// END_RDATA_NAMESPACE
// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/rrsig_46.h b/src/lib/dns/rdata/generic/rrsig_46.h
index 19acc40..b8e6306 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.h
+++ b/src/lib/dns/rdata/generic/rrsig_46.h
@@ -38,6 +38,9 @@ public:
// END_COMMON_MEMBERS
RRSIG& operator=(const RRSIG& source);
~RRSIG();
+
+ // specialized methods
+ const RRType& typeCovered();
private:
RRSIGImpl* impl_;
};
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.cc b/src/lib/dns/rdata/in_1/dhcid_49.cc
new file mode 100644
index 0000000..0a9a23c
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/dhcid_49.cc
@@ -0,0 +1,145 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+#include <util/encode/hex.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \param dhcid_str A base-64 representation of the DHCID binary data.
+/// The data is considered to be opaque, but a sanity check is performed.
+///
+/// <b>Exceptions</b>
+///
+/// \c dhcid_str must be a valid BASE-64 string, otherwise an exception
+/// of class \c isc::BadValue will be thrown;
+/// the binary data should consist of at leat of 3 octets as per RFC4701:
+/// < 2 octets > Identifier type code
+/// < 1 octet > Digest type code
+/// < n octets > Digest (length depends on digest type)
+/// If the data is less than 3 octets (i.e. it cannot contain id type code and
+/// digest type code), an exception of class \c InvalidRdataLength is thrown.
+DHCID::DHCID(const string& dhcid_str) {
+ istringstream iss(dhcid_str);
+ stringbuf digestbuf;
+
+ iss >> &digestbuf;
+ isc::util::encode::decodeHex(digestbuf.str(), digest_);
+
+ // RFC4701 states DNS software should consider the RDATA section to
+ // be opaque, but there must be at least three bytes in the data:
+ // < 2 octets > Identifier type code
+ // < 1 octet > Digest type code
+ if (digest_.size() < 3) {
+ isc_throw(InvalidRdataLength, "DHCID length " << digest_.size() <<
+ " too short, need at least 3 bytes");
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// \param buffer A buffer storing the wire format data.
+/// \param rdata_len The length of the RDATA in bytes
+///
+/// <b>Exceptions</b>
+/// \c InvalidRdataLength is thrown if \c rdata_len is than minimum of 3 octets
+DHCID::DHCID(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len < 3) {
+ isc_throw(InvalidRdataLength, "DHCID length " << rdata_len <<
+ " too short, need at least 3 bytes");
+ }
+
+ digest_.resize(rdata_len);
+ buffer.readData(&digest_[0], rdata_len);
+}
+
+/// \brief The copy constructor.
+///
+/// This trivial copy constructor never throws an exception.
+DHCID::DHCID(const DHCID& other) : Rdata(), digest_(other.digest_)
+{}
+
+/// \brief Render the \c DHCID in the wire format.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+DHCID::toWire(OutputBuffer& buffer) const {
+ buffer.writeData(&digest_[0], digest_.size());
+}
+
+/// \brief Render the \c DHCID in the wire format into a
+/// \c MessageRenderer object.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer in which the \c DHCID is to be stored.
+void
+DHCID::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeData(&digest_[0], digest_.size());
+}
+
+/// \brief Convert the \c DHCID to a string.
+///
+/// This method returns a \c std::string object representing the \c DHCID.
+///
+/// \return A string representation of \c DHCID.
+string
+DHCID::toText() const {
+ return (isc::util::encode::encodeHex(digest_));
+}
+
+/// \brief Compare two instances of \c DHCID RDATA.
+///
+/// See documentation in \c Rdata.
+int
+DHCID::compare(const Rdata& other) const {
+ const DHCID& other_dhcid = dynamic_cast<const DHCID&>(other);
+
+ size_t this_len = digest_.size();
+ size_t other_len = other_dhcid.digest_.size();
+ size_t cmplen = min(this_len, other_len);
+ int cmp = memcmp(&digest_[0], &other_dhcid.digest_[0], cmplen);
+ if (cmp != 0) {
+ return (cmp);
+ } else {
+ return ((this_len == other_len) ? 0 : (this_len < other_len) ? -1 : 1);
+ }
+}
+
+/// \brief Accessor method to get the DHCID digest
+///
+/// \return A reference to the binary DHCID data
+const std::vector<uint8_t>&
+DHCID::getDigest() const {
+ return (digest_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.h b/src/lib/dns/rdata/in_1/dhcid_49.h
new file mode 100644
index 0000000..919395f
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/dhcid_49.h
@@ -0,0 +1,58 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+#include <vector>
+
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::DHCID class represents the DHCID RDATA as defined %in
+/// RFC4701.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DHCID RDATA.
+class DHCID : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Return the digest.
+ ///
+ /// This method never throws an exception.
+ const std::vector<uint8_t>& getDigest() const;
+
+private:
+ /// \brief Private data representation
+ ///
+ /// Opaque data at least 3 octets long as per RFC4701.
+ ///
+ std::vector<uint8_t> digest_;
+};
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/in_1/srv_33.h b/src/lib/dns/rdata/in_1/srv_33.h
index d067021..32b7dc0 100644
--- a/src/lib/dns/rdata/in_1/srv_33.h
+++ b/src/lib/dns/rdata/in_1/srv_33.h
@@ -12,13 +12,13 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+// BEGIN_HEADER_GUARD
+
#include <stdint.h>
#include <dns/name.h>
#include <dns/rdata.h>
-// BEGIN_HEADER_GUARD
-
// BEGIN_ISC_NAMESPACE
// BEGIN_COMMON_DECLARATIONS
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index bd6fbe2..48bce85 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -32,6 +32,7 @@ run_unittests_SOURCES += rdata_ns_unittest.cc rdata_soa_unittest.cc
run_unittests_SOURCES += rdata_txt_unittest.cc rdata_mx_unittest.cc
run_unittests_SOURCES += rdata_ptr_unittest.cc rdata_cname_unittest.cc
run_unittests_SOURCES += rdata_dname_unittest.cc
+run_unittests_SOURCES += rdata_afsdb_unittest.cc
run_unittests_SOURCES += rdata_opt_unittest.cc
run_unittests_SOURCES += rdata_dnskey_unittest.cc
run_unittests_SOURCES += rdata_ds_unittest.cc
@@ -42,7 +43,9 @@ run_unittests_SOURCES += rdata_nsec3param_unittest.cc
run_unittests_SOURCES += rdata_rrsig_unittest.cc
run_unittests_SOURCES += rdata_rp_unittest.cc
run_unittests_SOURCES += rdata_srv_unittest.cc
+run_unittests_SOURCES += rdata_minfo_unittest.cc
run_unittests_SOURCES += rdata_tsig_unittest.cc
+run_unittests_SOURCES += rdata_naptr_unittest.cc
run_unittests_SOURCES += rrset_unittest.cc rrsetlist_unittest.cc
run_unittests_SOURCES += question_unittest.cc
run_unittests_SOURCES += rrparamregistry_unittest.cc
diff --git a/src/lib/dns/tests/rdata_afsdb_unittest.cc b/src/lib/dns/tests/rdata_afsdb_unittest.cc
new file mode 100644
index 0000000..7df8d83
--- /dev/null
+++ b/src/lib/dns/tests/rdata_afsdb_unittest.cc
@@ -0,0 +1,210 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+const char* const afsdb_text = "1 afsdb.example.com.";
+const char* const afsdb_text2 = "0 root.example.com.";
+const char* const too_long_label("012345678901234567890123456789"
+ "0123456789012345678901234567890123");
+
+namespace {
+class Rdata_AFSDB_Test : public RdataTest {
+protected:
+ Rdata_AFSDB_Test() :
+ rdata_afsdb(string(afsdb_text)), rdata_afsdb2(string(afsdb_text2))
+ {}
+
+ const generic::AFSDB rdata_afsdb;
+ const generic::AFSDB rdata_afsdb2;
+ vector<uint8_t> expected_wire;
+};
+
+
+TEST_F(Rdata_AFSDB_Test, createFromText) {
+ EXPECT_EQ(1, rdata_afsdb.getSubtype());
+ EXPECT_EQ(Name("afsdb.example.com."), rdata_afsdb.getServer());
+
+ EXPECT_EQ(0, rdata_afsdb2.getSubtype());
+ EXPECT_EQ(Name("root.example.com."), rdata_afsdb2.getServer());
+}
+
+TEST_F(Rdata_AFSDB_Test, badText) {
+ // subtype is too large
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("99999999 afsdb.example.com."),
+ InvalidRdataText);
+ // incomplete text
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("10"), InvalidRdataText);
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("SPOON"), InvalidRdataText);
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("1root.example.com."), InvalidRdataText);
+ // number of fields (must be 2) is incorrect
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("10 afsdb. example.com."),
+ InvalidRdataText);
+ // bad name
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("1 afsdb.example.com." +
+ string(too_long_label)), TooLongLabel);
+}
+
+TEST_F(Rdata_AFSDB_Test, assignment) {
+ generic::AFSDB copy((string(afsdb_text2)));
+ copy = rdata_afsdb;
+ EXPECT_EQ(0, copy.compare(rdata_afsdb));
+
+ // Check if the copied data is valid even after the original is deleted
+ generic::AFSDB* copy2 = new generic::AFSDB(rdata_afsdb);
+ generic::AFSDB copy3((string(afsdb_text2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_afsdb));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_afsdb));
+}
+
+TEST_F(Rdata_AFSDB_Test, createFromWire) {
+ // uncompressed names
+ EXPECT_EQ(0, rdata_afsdb.compare(
+ *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire1.wire")));
+ // compressed name
+ EXPECT_EQ(0, rdata_afsdb.compare(
+ *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire2.wire", 13)));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire3.wire"),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire4.wire"),
+ InvalidRdataLength);
+ // bogus server name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire5.wire"),
+ DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireBuffer) {
+ // construct actual data
+ rdata_afsdb.toWire(obuffer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ &expected_wire[0], expected_wire.size());
+
+ // clear buffer for the next test
+ obuffer.clear();
+
+ // construct actual data
+ Name("example.com.").toWire(obuffer);
+ rdata_afsdb2.toWire(obuffer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireRenderer) {
+ // similar to toWireBuffer, but names in RDATA could be compressed due to
+ // preceding names. Actually they must not be compressed according to
+ // RFC3597, and this test checks that.
+
+ // construct actual data
+ rdata_afsdb.toWire(renderer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ renderer.getData(), renderer.getLength(),
+ &expected_wire[0], expected_wire.size());
+
+ // clear renderer for the next test
+ renderer.clear();
+
+ // construct actual data
+ Name("example.com.").toWire(obuffer);
+ rdata_afsdb2.toWire(renderer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ renderer.getData(), renderer.getLength(),
+ &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toText) {
+ EXPECT_EQ(afsdb_text, rdata_afsdb.toText());
+ EXPECT_EQ(afsdb_text2, rdata_afsdb2.toText());
+}
+
+TEST_F(Rdata_AFSDB_Test, compare) {
+ // check reflexivity
+ EXPECT_EQ(0, rdata_afsdb.compare(rdata_afsdb));
+
+ // name must be compared in case-insensitive manner
+ EXPECT_EQ(0, rdata_afsdb.compare(generic::AFSDB("1 "
+ "AFSDB.example.com.")));
+
+ const generic::AFSDB small1("10 afsdb.example.com");
+ const generic::AFSDB large1("65535 afsdb.example.com");
+ const generic::AFSDB large2("256 afsdb.example.com");
+
+ // confirm these are compared as unsigned values
+ EXPECT_GT(0, rdata_afsdb.compare(large1));
+ EXPECT_LT(0, large1.compare(rdata_afsdb));
+
+ // confirm these are compared in network byte order
+ EXPECT_GT(0, small1.compare(large2));
+ EXPECT_LT(0, large2.compare(small1));
+
+ // another AFSDB whose server name is larger than that of rdata_afsdb.
+ const generic::AFSDB large3("256 zzzzz.example.com");
+ EXPECT_GT(0, large2.compare(large3));
+ EXPECT_LT(0, large3.compare(large2));
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_afsdb.compare(*rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_minfo_unittest.cc b/src/lib/dns/tests/rdata_minfo_unittest.cc
new file mode 100644
index 0000000..30c7c39
--- /dev/null
+++ b/src/lib/dns/tests/rdata_minfo_unittest.cc
@@ -0,0 +1,184 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for generic
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+// minfo text
+const char* const minfo_txt = "rmailbox.example.com. emailbox.example.com.";
+const char* const minfo_txt2 = "root.example.com. emailbox.example.com.";
+const char* const too_long_label = "01234567890123456789012345678901234567"
+ "89012345678901234567890123";
+
+namespace {
+class Rdata_MINFO_Test : public RdataTest {
+public:
+ Rdata_MINFO_Test():
+ rdata_minfo(string(minfo_txt)), rdata_minfo2(string(minfo_txt2)) {}
+
+ const generic::MINFO rdata_minfo;
+ const generic::MINFO rdata_minfo2;
+};
+
+
+TEST_F(Rdata_MINFO_Test, createFromText) {
+ EXPECT_EQ(Name("rmailbox.example.com."), rdata_minfo.getRmailbox());
+ EXPECT_EQ(Name("emailbox.example.com."), rdata_minfo.getEmailbox());
+
+ EXPECT_EQ(Name("root.example.com."), rdata_minfo2.getRmailbox());
+ EXPECT_EQ(Name("emailbox.example.com."), rdata_minfo2.getEmailbox());
+}
+
+TEST_F(Rdata_MINFO_Test, badText) {
+ // incomplete text
+ EXPECT_THROW(generic::MINFO("root.example.com."),
+ InvalidRdataText);
+ // number of fields (must be 2) is incorrect
+ EXPECT_THROW(generic::MINFO("root.example.com emailbox.example.com. "
+ "example.com."),
+ InvalidRdataText);
+ // bad rmailbox name
+ EXPECT_THROW(generic::MINFO("root.example.com. emailbox.example.com." +
+ string(too_long_label)),
+ TooLongLabel);
+ // bad emailbox name
+ EXPECT_THROW(generic::MINFO("root.example.com." +
+ string(too_long_label) + " emailbox.example.com."),
+ TooLongLabel);
+}
+
+TEST_F(Rdata_MINFO_Test, createFromWire) {
+ // uncompressed names
+ EXPECT_EQ(0, rdata_minfo.compare(
+ *rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire1.wire")));
+ // compressed names
+ EXPECT_EQ(0, rdata_minfo.compare(
+ *rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire2.wire", 15)));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire3.wire"),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire4.wire"),
+ InvalidRdataLength);
+ // bogus rmailbox name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire5.wire"),
+ DNSMessageFORMERR);
+ // bogus emailbox name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire6.wire"),
+ DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_MINFO_Test, assignment) {
+ generic::MINFO copy((string(minfo_txt2)));
+ copy = rdata_minfo;
+ EXPECT_EQ(0, copy.compare(rdata_minfo));
+
+ // Check if the copied data is valid even after the original is deleted
+ generic::MINFO* copy2 = new generic::MINFO(rdata_minfo);
+ generic::MINFO copy3((string(minfo_txt2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_minfo));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_minfo));
+}
+
+TEST_F(Rdata_MINFO_Test, toWireBuffer) {
+ rdata_minfo.toWire(obuffer);
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_minfo_toWireUncompressed1.wire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data[0], data.size());
+
+ obuffer.clear();
+ rdata_minfo2.toWire(obuffer);
+ vector<unsigned char> data2;
+ UnitTestUtil::readWireData("rdata_minfo_toWireUncompressed2.wire", data2);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data2[0], data2.size());
+}
+
+TEST_F(Rdata_MINFO_Test, toWireRenderer) {
+ rdata_minfo.toWire(renderer);
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_minfo_toWire1.wire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data[0], data.size());
+ renderer.clear();
+ rdata_minfo2.toWire(renderer);
+ vector<unsigned char> data2;
+ UnitTestUtil::readWireData("rdata_minfo_toWire2.wire", data2);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data2[0], data2.size());
+}
+
+TEST_F(Rdata_MINFO_Test, toText) {
+ EXPECT_EQ(minfo_txt, rdata_minfo.toText());
+ EXPECT_EQ(minfo_txt2, rdata_minfo2.toText());
+}
+
+TEST_F(Rdata_MINFO_Test, compare) {
+ // check reflexivity
+ EXPECT_EQ(0, rdata_minfo.compare(rdata_minfo));
+
+ // names must be compared in case-insensitive manner
+ EXPECT_EQ(0, rdata_minfo.compare(generic::MINFO("RMAILBOX.example.com. "
+ "emailbox.EXAMPLE.com.")));
+
+ // another MINFO whose rmailbox name is larger than that of rdata_minfo.
+ const generic::MINFO large1_minfo("zzzzzzzz.example.com. "
+ "emailbox.example.com.");
+ EXPECT_GT(0, rdata_minfo.compare(large1_minfo));
+ EXPECT_LT(0, large1_minfo.compare(rdata_minfo));
+
+ // another MINFO whose emailbox name is larger than that of rdata_minfo.
+ const generic::MINFO large2_minfo("rmailbox.example.com. "
+ "zzzzzzzzzzz.example.com.");
+ EXPECT_GT(0, rdata_minfo.compare(large2_minfo));
+ EXPECT_LT(0, large2_minfo.compare(rdata_minfo));
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_minfo.compare(*RdataTest::rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_naptr_unittest.cc b/src/lib/dns/tests/rdata_naptr_unittest.cc
new file mode 100644
index 0000000..f905943
--- /dev/null
+++ b/src/lib/dns/tests/rdata_naptr_unittest.cc
@@ -0,0 +1,178 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+using namespace isc::dns::rdata::generic;
+
+namespace {
+class Rdata_NAPTR_Test : public RdataTest {
+};
+
+// 10 100 "S" "SIP+D2U" "" _sip._udp.example.com.
+static uint8_t naptr_rdata[] = {0x00,0x0a,0x00,0x64,0x01,0x53,0x07,0x53,0x49,
+ 0x50,0x2b,0x44,0x32,0x55,0x00,0x04,0x5f,0x73,0x69,0x70,0x04,0x5f,0x75,0x64,
+ 0x70,0x07,0x65,0x78,0x61,0x6d,0x70,0x6c,0x65,0x03,0x63,0x6f,0x6d,0x00};
+
+static const char *naptr_str =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str2 =
+ "10 100 S SIP+D2U \"\" _sip._udp.example.com.";
+
+static const char *naptr_str_small1 =
+ "9 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small2 =
+ "10 90 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small3 =
+ "10 100 \"R\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small4 =
+ "10 100 \"S\" \"SIP+C2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small5 =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _rip._udp.example.com.";
+
+static const char *naptr_str_large1 =
+ "11 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large2 =
+ "10 110 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large3 =
+ "10 100 \"T\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large4 =
+ "10 100 \"S\" \"SIP+E2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large5 =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _tip._udp.example.com.";
+
+TEST_F(Rdata_NAPTR_Test, createFromText) {
+ NAPTR naptr(naptr_str);
+ EXPECT_EQ(10, naptr.getOrder());
+ EXPECT_EQ(100, naptr.getPreference());
+ EXPECT_EQ(string("S"), naptr.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr.getServices());
+ EXPECT_EQ(string(""), naptr.getRegexp());
+ EXPECT_EQ(Name("_sip._udp.example.com."), naptr.getReplacement());
+
+ // Test <char-string> that separated by space
+ NAPTR naptr2(naptr_str2);
+ EXPECT_EQ(string("S"), naptr2.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr2.getServices());
+}
+
+TEST_F(Rdata_NAPTR_Test, badText) {
+ // Order number cannot exceed 65535
+ EXPECT_THROW(const NAPTR naptr("65536 10 S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Preference number cannot exceed 65535
+ EXPECT_THROW(const NAPTR naptr("100 65536 S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // No regexp given
+ EXPECT_THROW(const NAPTR naptr("100 10 S SIP _sip._udp.example.com."),
+ InvalidRdataText);
+ // The double quotes seperator must match
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Order or preference cannot be missed
+ EXPECT_THROW(const NAPTR naptr("10 \"S\" SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Fields must be seperated by spaces
+ EXPECT_THROW(const NAPTR naptr("100 10S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S\"\"SIP\" \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Field cannot be missing
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S\""), InvalidRdataText);
+
+ // The <character-string> cannot exceed 255 characters
+ string naptr_str;
+ naptr_str += "100 10 ";
+ for (int i = 0; i < 257; ++i) {
+ naptr_str += 'A';
+ }
+ naptr_str += " SIP \"\" _sip._udp.example.com.";
+ EXPECT_THROW(const NAPTR naptr(naptr_str), CharStringTooLong);
+}
+
+TEST_F(Rdata_NAPTR_Test, createFromWire) {
+ InputBuffer input_buffer(naptr_rdata, sizeof(naptr_rdata));
+ NAPTR naptr(input_buffer, sizeof(naptr_rdata));
+ EXPECT_EQ(10, naptr.getOrder());
+ EXPECT_EQ(100, naptr.getPreference());
+ EXPECT_EQ(string("S"), naptr.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr.getServices());
+ EXPECT_EQ(string(""), naptr.getRegexp());
+ EXPECT_EQ(Name("_sip._udp.example.com."), naptr.getReplacement());
+}
+
+TEST_F(Rdata_NAPTR_Test, toWire) {
+ NAPTR naptr(naptr_str);
+ naptr.toWire(obuffer);
+
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), naptr_rdata, sizeof(naptr_rdata));
+}
+
+TEST_F(Rdata_NAPTR_Test, toWireRenderer) {
+ NAPTR naptr(naptr_str);
+
+ naptr.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), naptr_rdata, sizeof(naptr_rdata));
+}
+
+TEST_F(Rdata_NAPTR_Test, toText) {
+ NAPTR naptr(naptr_str);
+ EXPECT_EQ(naptr_str, naptr.toText());
+}
+
+TEST_F(Rdata_NAPTR_Test, compare) {
+ NAPTR naptr(naptr_str);
+ NAPTR naptr_small1(naptr_str_small1);
+ NAPTR naptr_small2(naptr_str_small2);
+ NAPTR naptr_small3(naptr_str_small3);
+ NAPTR naptr_small4(naptr_str_small4);
+ NAPTR naptr_small5(naptr_str_small5);
+ NAPTR naptr_large1(naptr_str_large1);
+ NAPTR naptr_large2(naptr_str_large2);
+ NAPTR naptr_large3(naptr_str_large3);
+ NAPTR naptr_large4(naptr_str_large4);
+ NAPTR naptr_large5(naptr_str_large5);
+
+ EXPECT_EQ(0, naptr.compare(NAPTR(naptr_str)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small1)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small2)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small3)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small4)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small5)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large1)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large2)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large3)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large4)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large5)));
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_rrsig_unittest.cc b/src/lib/dns/tests/rdata_rrsig_unittest.cc
index 903021f..3324b99 100644
--- a/src/lib/dns/tests/rdata_rrsig_unittest.cc
+++ b/src/lib/dns/tests/rdata_rrsig_unittest.cc
@@ -47,7 +47,7 @@ TEST_F(Rdata_RRSIG_Test, fromText) {
"f49t+sXKPzbipN9g+s1ZPiIyofc=");
generic::RRSIG rdata_rrsig(rrsig_txt);
EXPECT_EQ(rrsig_txt, rdata_rrsig.toText());
-
+ EXPECT_EQ(isc::dns::RRType::A(), rdata_rrsig.typeCovered());
}
TEST_F(Rdata_RRSIG_Test, badText) {
diff --git a/src/lib/dns/tests/testdata/Makefile.am b/src/lib/dns/tests/testdata/Makefile.am
index 743b5d2..3aa4937 100644
--- a/src/lib/dns/tests/testdata/Makefile.am
+++ b/src/lib/dns/tests/testdata/Makefile.am
@@ -26,10 +26,20 @@ BUILT_SOURCES += rdata_nsec3_fromWire10.wire rdata_nsec3_fromWire11.wire
BUILT_SOURCES += rdata_nsec3_fromWire12.wire rdata_nsec3_fromWire13.wire
BUILT_SOURCES += rdata_nsec3_fromWire14.wire rdata_nsec3_fromWire15.wire
BUILT_SOURCES += rdata_rrsig_fromWire2.wire
+BUILT_SOURCES += rdata_minfo_fromWire1.wire rdata_minfo_fromWire2.wire
+BUILT_SOURCES += rdata_minfo_fromWire3.wire rdata_minfo_fromWire4.wire
+BUILT_SOURCES += rdata_minfo_fromWire5.wire rdata_minfo_fromWire6.wire
+BUILT_SOURCES += rdata_minfo_toWire1.wire rdata_minfo_toWire2.wire
+BUILT_SOURCES += rdata_minfo_toWireUncompressed1.wire
+BUILT_SOURCES += rdata_minfo_toWireUncompressed2.wire
BUILT_SOURCES += rdata_rp_fromWire1.wire rdata_rp_fromWire2.wire
BUILT_SOURCES += rdata_rp_fromWire3.wire rdata_rp_fromWire4.wire
BUILT_SOURCES += rdata_rp_fromWire5.wire rdata_rp_fromWire6.wire
BUILT_SOURCES += rdata_rp_toWire1.wire rdata_rp_toWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire1.wire rdata_afsdb_fromWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire3.wire rdata_afsdb_fromWire4.wire
+BUILT_SOURCES += rdata_afsdb_fromWire5.wire
+BUILT_SOURCES += rdata_afsdb_toWire1.wire rdata_afsdb_toWire2.wire
BUILT_SOURCES += rdata_soa_toWireUncompressed.wire
BUILT_SOURCES += rdata_txt_fromWire2.wire rdata_txt_fromWire3.wire
BUILT_SOURCES += rdata_txt_fromWire4.wire rdata_txt_fromWire5.wire
@@ -99,8 +109,18 @@ EXTRA_DIST += rdata_rp_fromWire1.spec rdata_rp_fromWire2.spec
EXTRA_DIST += rdata_rp_fromWire3.spec rdata_rp_fromWire4.spec
EXTRA_DIST += rdata_rp_fromWire5.spec rdata_rp_fromWire6.spec
EXTRA_DIST += rdata_rp_toWire1.spec rdata_rp_toWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire1.spec rdata_afsdb_fromWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire3.spec rdata_afsdb_fromWire4.spec
+EXTRA_DIST += rdata_afsdb_fromWire5.spec
+EXTRA_DIST += rdata_afsdb_toWire1.spec rdata_afsdb_toWire2.spec
EXTRA_DIST += rdata_soa_fromWire rdata_soa_toWireUncompressed.spec
EXTRA_DIST += rdata_srv_fromWire
+EXTRA_DIST += rdata_minfo_fromWire1.spec rdata_minfo_fromWire2.spec
+EXTRA_DIST += rdata_minfo_fromWire3.spec rdata_minfo_fromWire4.spec
+EXTRA_DIST += rdata_minfo_fromWire5.spec rdata_minfo_fromWire6.spec
+EXTRA_DIST += rdata_minfo_toWire1.spec rdata_minfo_toWire2.spec
+EXTRA_DIST += rdata_minfo_toWireUncompressed1.spec
+EXTRA_DIST += rdata_minfo_toWireUncompressed2.spec
EXTRA_DIST += rdata_txt_fromWire1 rdata_txt_fromWire2.spec
EXTRA_DIST += rdata_txt_fromWire3.spec rdata_txt_fromWire4.spec
EXTRA_DIST += rdata_txt_fromWire5.spec rdata_unknown_fromWire
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
new file mode 100644
index 0000000..f831313
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: afsdb
+[afsdb]
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
new file mode 100644
index 0000000..f33e768
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com
+[afsdb]
+server: afsdb.ptr=0
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
new file mode 100644
index 0000000..993032f
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
new file mode 100644
index 0000000..37abf13
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
new file mode 100644
index 0000000..0ea79dd
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+server: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
new file mode 100644
index 0000000..1946458
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
new file mode 100644
index 0000000..c80011a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
@@ -0,0 +1,8 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com.
+[afsdb]
+subtype: 0
+server: root.example.com
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
new file mode 100644
index 0000000..2c43db0
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: minfo
+[minfo]
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
new file mode 100644
index 0000000..d781cac
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
@@ -0,0 +1,7 @@
+[custom]
+sections: name:minfo
+[name]
+name: a.example.com.
+[minfo]
+rmailbox: rmailbox.ptr=02
+emailbox: emailbox.ptr=02
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
new file mode 100644
index 0000000..a1d4b76
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+# rdlength too short
+[minfo]
+emailbox: emailbox.ptr=11
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
new file mode 100644
index 0000000..269a6ce
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+# rdlength too long
+[minfo]
+emailbox: emailbox.ptr=11
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
new file mode 100644
index 0000000..3a888e3
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+# bogus rmailbox name
+[minfo]
+rmailbox: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
new file mode 100644
index 0000000..c75ed8e
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+# bogus emailbox name
+[minfo]
+emailbox: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
new file mode 100644
index 0000000..7b340a3
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+[minfo]
+emailbox: emailbox.ptr=09
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
new file mode 100644
index 0000000..132f118
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+[minfo]
+rmailbox: root.example.com.
+emailbox: emailbox.ptr=05
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
new file mode 100644
index 0000000..d99a381
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
@@ -0,0 +1,7 @@
+#
+# A simplest form of MINFO: all default parameters
+#
+[custom]
+sections: minfo
+[minfo]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
new file mode 100644
index 0000000..0f78fcc
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
@@ -0,0 +1,8 @@
+#
+# A simplest form of MINFO: custom rmailbox and default emailbox
+#
+[custom]
+sections: minfo
+[minfo]
+rmailbox: root.example.com.
+rdlen: -1
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index d0f1d74..433bb7d 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -137,6 +137,18 @@ public:
};
///
+/// \brief A generic exception that is thrown when a function is
+/// not implemented.
+///
+/// This may be due to unfinished implementation or in case the
+/// function isn't even planned to be provided for that situation.
+class NotImplemented : public Exception {
+public:
+ NotImplemented(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+///
/// A shortcut macro to insert known values into exception arguments.
///
/// It allows the \c stream argument to be part of a statement using an
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index 4fa9d58..ba7724c 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -91,6 +91,7 @@ COMMAND_CONFIG_UPDATE = "config_update"
COMMAND_MODULE_SPECIFICATION_UPDATE = "module_specification_update"
COMMAND_GET_COMMANDS_SPEC = "get_commands_spec"
+COMMAND_GET_STATISTICS_SPEC = "get_statistics_spec"
COMMAND_GET_CONFIG = "get_config"
COMMAND_SET_CONFIG = "set_config"
COMMAND_GET_MODULE_SPEC = "get_module_spec"
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 18e001c..1db9fd3 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -267,6 +267,19 @@ class ConfigManager:
commands[module_name] = self.module_specs[module_name].get_commands_spec()
return commands
+ def get_statistics_spec(self, name = None):
+ """Returns a dict containing 'module_name': statistics_spec for
+ all modules. If name is specified, only that module will
+ be included"""
+ statistics = {}
+ if name:
+ if name in self.module_specs:
+ statistics[name] = self.module_specs[name].get_statistics_spec()
+ else:
+ for module_name in self.module_specs.keys():
+ statistics[module_name] = self.module_specs[module_name].get_statistics_spec()
+ return statistics
+
def read_config(self):
"""Read the current configuration from the file specificied at init()"""
try:
@@ -457,6 +470,8 @@ class ConfigManager:
if cmd:
if cmd == ccsession.COMMAND_GET_COMMANDS_SPEC:
answer = ccsession.create_answer(0, self.get_commands_spec())
+ elif cmd == ccsession.COMMAND_GET_STATISTICS_SPEC:
+ answer = ccsession.create_answer(0, self.get_statistics_spec())
elif cmd == ccsession.COMMAND_GET_MODULE_SPEC:
answer = self._handle_get_module_spec(arg)
elif cmd == ccsession.COMMAND_GET_CONFIG:
diff --git a/src/lib/python/isc/config/module_spec.py b/src/lib/python/isc/config/module_spec.py
index 9aa49e0..b79f928 100644
--- a/src/lib/python/isc/config/module_spec.py
+++ b/src/lib/python/isc/config/module_spec.py
@@ -23,6 +23,7 @@
import json
import sys
+import time
import isc.cc.data
@@ -91,7 +92,7 @@ class ModuleSpec:
return _validate_spec_list(data_def, full, data, errors)
else:
# no spec, always bad
- if errors != None:
+ if errors is not None:
errors.append("No config_data specification")
return False
@@ -117,6 +118,26 @@ class ModuleSpec:
return False
+ def validate_statistics(self, full, stat, errors = None):
+ """Check whether the given piece of data conforms to this
+ data definition. If so, it returns True. If not, it will
+ return false. If errors is given, and is an array, a string
+ describing the error will be appended to it. The current
+ version stops as soon as there is one error so this list
+ will not be exhaustive. If 'full' is true, it also errors on
+ non-optional missing values. Set this to False if you want to
+ validate only a part of a statistics tree (like a list of
+ non-default values). Also it checks 'item_format' in case
+ of time"""
+ stat_spec = self.get_statistics_spec()
+ if stat_spec is not None:
+ return _validate_spec_list(stat_spec, full, stat, errors)
+ else:
+ # no spec, always bad
+ if errors is not None:
+ errors.append("No statistics specification")
+ return False
+
def get_module_name(self):
"""Returns a string containing the name of the module as
specified by the specification given at __init__()"""
@@ -152,6 +173,14 @@ class ModuleSpec:
else:
return None
+ def get_statistics_spec(self):
+ """Returns a dict representation of the statistics part of the
+ specification, or None if there is none."""
+ if 'statistics' in self._module_spec:
+ return self._module_spec['statistics']
+ else:
+ return None
+
def __str__(self):
"""Returns a string representation of the full specification"""
return self._module_spec.__str__()
@@ -160,8 +189,9 @@ def _check(module_spec):
"""Checks the full specification. This is a dict that contains the
element "module_spec", which is in itself a dict that
must contain at least a "module_name" (string) and optionally
- a "config_data" and a "commands" element, both of which are lists
- of dicts. Raises a ModuleSpecError if there is a problem."""
+ a "config_data", a "commands" and a "statistics" element, all
+ of which are lists of dicts. Raises a ModuleSpecError if there
+ is a problem."""
if type(module_spec) != dict:
raise ModuleSpecError("data specification not a dict")
if "module_name" not in module_spec:
@@ -173,6 +203,8 @@ def _check(module_spec):
_check_config_spec(module_spec["config_data"])
if "commands" in module_spec:
_check_command_spec(module_spec["commands"])
+ if "statistics" in module_spec:
+ _check_statistics_spec(module_spec["statistics"])
def _check_config_spec(config_data):
# config data is a list of items represented by dicts that contain
@@ -263,34 +295,75 @@ def _check_item_spec(config_item):
if type(map_item) != dict:
raise ModuleSpecError("map_item_spec element is not a dict")
_check_item_spec(map_item)
+ if 'item_format' in config_item and 'item_default' in config_item:
+ item_format = config_item["item_format"]
+ item_default = config_item["item_default"]
+ if not _check_format(item_default, item_format):
+ raise ModuleSpecError(
+ "Wrong format for " + str(item_default) + " in " + str(item_name))
+def _check_statistics_spec(statistics):
+ # statistics is a list of items represented by dicts that contain
+ # things like "item_name", depending on the type they can have
+ # specific subitems
+ """Checks a list that contains the statistics part of the
+ specification. Raises a ModuleSpecError if there is a
+ problem."""
+ if type(statistics) != list:
+ raise ModuleSpecError("statistics is of type " + str(type(statistics))
+ + ", not a list of items")
+ for stat_item in statistics:
+ _check_item_spec(stat_item)
+ # Additionally checks if there are 'item_title' and
+ # 'item_description'
+ for item in [ 'item_title', 'item_description' ]:
+ if item not in stat_item:
+ raise ModuleSpecError("no " + item + " in statistics item")
+
+def _check_format(value, format_name):
+ """Check if specified value and format are correct. Return True if
+ is is correct."""
+ # TODO: should be added other format types if necessary
+ time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
+ 'date' : "%Y-%m-%d",
+ 'time' : "%H:%M:%S" }
+ for fmt in time_formats:
+ if format_name == fmt:
+ try:
+ # reverse check
+ return value == time.strftime(
+ time_formats[fmt],
+ time.strptime(value, time_formats[fmt]))
+ except (ValueError, TypeError):
+ break
+ return False
def _validate_type(spec, value, errors):
"""Returns true if the value is of the correct type given the
specification"""
data_type = spec['item_type']
if data_type == "integer" and type(value) != int:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be an integer")
return False
elif data_type == "real" and type(value) != float:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a real")
return False
elif data_type == "boolean" and type(value) != bool:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a boolean")
return False
elif data_type == "string" and type(value) != str:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a string")
return False
elif data_type == "list" and type(value) != list:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a list")
return False
elif data_type == "map" and type(value) != dict:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a map")
return False
elif data_type == "named_set" and type(value) != dict:
@@ -300,6 +373,18 @@ def _validate_type(spec, value, errors):
else:
return True
+def _validate_format(spec, value, errors):
+ """Returns true if the value is of the correct format given the
+ specification. And also return true if no 'item_format'"""
+ if "item_format" in spec:
+ item_format = spec['item_format']
+ if not _check_format(value, item_format):
+ if errors is not None:
+ errors.append("format type of " + str(value)
+ + " should be " + item_format)
+ return False
+ return True
+
def _validate_item(spec, full, data, errors):
if not _validate_type(spec, data, errors):
return False
@@ -308,6 +393,8 @@ def _validate_item(spec, full, data, errors):
for data_el in data:
if not _validate_type(list_spec, data_el, errors):
return False
+ if not _validate_format(list_spec, data_el, errors):
+ return False
if list_spec['item_type'] == "map":
if not _validate_item(list_spec, full, data_el, errors):
return False
@@ -322,6 +409,8 @@ def _validate_item(spec, full, data, errors):
return False
if not _validate_item(named_set_spec, full, data_el, errors):
return False
+ elif not _validate_format(spec, data, errors):
+ return False
return True
def _validate_spec(spec, full, data, errors):
@@ -333,7 +422,7 @@ def _validate_spec(spec, full, data, errors):
elif item_name in data:
return _validate_item(spec, full, data[item_name], errors)
elif full and not item_optional:
- if errors != None:
+ if errors is not None:
errors.append("non-optional item " + item_name + " missing")
return False
else:
@@ -358,7 +447,7 @@ def _validate_spec_list(module_spec, full, data, errors):
if spec_item["item_name"] == item_name:
found = True
if not found and item_name != "version":
- if errors != None:
+ if errors is not None:
errors.append("unknown item " + item_name)
validated = False
return validated
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index 0a9e2d3..eacc425 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -219,6 +219,25 @@ class TestConfigManager(unittest.TestCase):
commands_spec = self.cm.get_commands_spec('Spec2')
self.assertEqual(commands_spec['Spec2'], module_spec.get_commands_spec())
+ def test_get_statistics_spec(self):
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec, {})
+ module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
+ self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.cm.set_module_spec(module_spec)
+ self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec, { 'Spec1': None })
+ self.cm.remove_module_spec('Spec1')
+ module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
+ self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.cm.set_module_spec(module_spec)
+ self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
+ statistics_spec = self.cm.get_statistics_spec('Spec2')
+ self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
+
def test_read_config(self):
self.assertEqual(self.cm.config.data, {'version': config_data.BIND10_CONFIG_DATA_VERSION})
self.cm.read_config()
@@ -241,6 +260,7 @@ class TestConfigManager(unittest.TestCase):
self._handle_msg_helper("", { 'result': [ 1, 'Unknown message format: ']})
self._handle_msg_helper({ "command": [ "badcommand" ] }, { 'result': [ 1, "Unknown command: badcommand"]})
self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, {} ]})
+ self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, {} ]})
self._handle_msg_helper({ "command": [ "get_module_spec" ] }, { 'result': [ 0, {} ]})
self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "Spec2" } ] }, { 'result': [ 0, {} ]})
#self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "nosuchmodule" } ] },
@@ -329,6 +349,7 @@ class TestConfigManager(unittest.TestCase):
{ "module_name" : "Spec2" } ] },
{ 'result': [ 0, self.spec.get_full_spec() ] })
self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_commands_spec() } ]})
+ self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_statistics_spec() } ]})
# re-add this once we have new way to propagate spec changes (1 instead of the current 2 messages)
#self.assertEqual(len(self.fake_session.message_queue), 2)
# the name here is actually wrong (and hardcoded), but needed in the current version
@@ -450,6 +471,7 @@ class TestConfigManager(unittest.TestCase):
def test_run(self):
self.fake_session.group_sendmsg({ "command": [ "get_commands_spec" ] }, "ConfigManager")
+ self.fake_session.group_sendmsg({ "command": [ "get_statistics_spec" ] }, "ConfigManager")
self.fake_session.group_sendmsg({ "command": [ "shutdown" ] }, "ConfigManager")
self.cm.run()
pass
diff --git a/src/lib/python/isc/config/tests/module_spec_test.py b/src/lib/python/isc/config/tests/module_spec_test.py
index be862c5..fc53d23 100644
--- a/src/lib/python/isc/config/tests/module_spec_test.py
+++ b/src/lib/python/isc/config/tests/module_spec_test.py
@@ -81,6 +81,11 @@ class TestModuleSpec(unittest.TestCase):
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec20.spec")
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec21.spec")
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec26.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec34.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec35.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec36.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec37.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec38.spec")
def validate_data(self, specfile_name, datafile_name):
dd = self.read_spec_file(specfile_name);
@@ -123,6 +128,17 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(False, self.validate_command_params("spec27.spec", "data22_8.data", 'cmd1'))
self.assertEqual(False, self.validate_command_params("spec27.spec", "data22_8.data", 'cmd2'))
+ def test_statistics_validation(self):
+ def _validate_stat(specfile_name, datafile_name):
+ dd = self.read_spec_file(specfile_name);
+ data_file = open(self.spec_file(datafile_name))
+ data_str = data_file.read()
+ data = isc.cc.data.parse_value_str(data_str)
+ return dd.validate_statistics(True, data, [])
+ self.assertFalse(self.read_spec_file("spec1.spec").validate_statistics(True, None, None));
+ self.assertTrue(_validate_stat("spec33.spec", "data33_1.data"))
+ self.assertFalse(_validate_stat("spec33.spec", "data33_2.data"))
+
def test_init(self):
self.assertRaises(ModuleSpecError, ModuleSpec, 1)
module_spec = isc.config.module_spec_from_file(self.spec_file("spec1.spec"), False)
@@ -269,6 +285,80 @@ class TestModuleSpec(unittest.TestCase):
}
)
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_datetime",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "date-time"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_date",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "date"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_time",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "time"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_datetime",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "2011-05-27T19:42:57Z",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_date",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "2011-05-27",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_time",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "19:42:57Z",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ def test_check_format(self):
+ self.assertTrue(isc.config.module_spec._check_format('2011-05-27T19:42:57Z', 'date-time'))
+ self.assertTrue(isc.config.module_spec._check_format('2011-05-27', 'date'))
+ self.assertTrue(isc.config.module_spec._check_format('19:42:57', 'time'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T19:42:57Z', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('19:42:57', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-13-99T99:99:99Z', 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-13-99', 'date'))
+ self.assertFalse(isc.config.module_spec._check_format('99:99:99', 'time'))
+ self.assertFalse(isc.config.module_spec._check_format('', 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format(None, 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format(None, None))
+ # wrong date-time-type format not ending with "Z"
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T19:42:57', 'date-time'))
+ # wrong date-type format ending with "T"
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T', 'date'))
+ # wrong time-type format ending with "Z"
+ self.assertFalse(isc.config.module_spec._check_format('19:42:57Z', 'time'))
+
def test_validate_type(self):
errors = []
self.assertEqual(True, isc.config.module_spec._validate_type({ 'item_type': 'integer' }, 1, errors))
@@ -306,6 +396,25 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(False, isc.config.module_spec._validate_type({ 'item_type': 'map' }, 1, errors))
self.assertEqual(['1 should be a map'], errors)
+ def test_validate_format(self):
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "2011-05-27T19:42:57Z", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "a", errors))
+ self.assertEqual(['format type of a should be date-time'], errors)
+
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "2011-05-27", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "a", errors))
+ self.assertEqual(['format type of a should be date'], errors)
+
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "19:42:57", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "a", errors))
+ self.assertEqual(['format type of a should be time'], errors)
+
def test_validate_spec(self):
spec = { 'item_name': "an_item",
'item_type': "string",
diff --git a/src/lib/util/filename.h b/src/lib/util/filename.h
index c9874ce..f625938 100644
--- a/src/lib/util/filename.h
+++ b/src/lib/util/filename.h
@@ -103,6 +103,11 @@ public:
return (extension_);
}
+ /// \return Name + extension of Given File Name
+ std::string nameAndExtension() const {
+ return (name_ + extension_);
+ }
+
/// \brief Expand Name with Default
///
/// A default file specified is supplied and used to fill in any missing
diff --git a/src/lib/util/python/gen_wiredata.py.in b/src/lib/util/python/gen_wiredata.py.in
index 8e1f079..8bd2b3c 100755
--- a/src/lib/util/python/gen_wiredata.py.in
+++ b/src/lib/util/python/gen_wiredata.py.in
@@ -822,6 +822,49 @@ class RP(RR):
f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
f.write('%s %s\n' % (mailbox_wire, text_wire))
+class MINFO(RR):
+ '''Implements rendering MINFO RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - rmailbox (string): The rmailbox field.
+ - emailbox (string): The emailbox field.
+ These strings must be interpreted as a valid domain name.
+ '''
+ rmailbox = 'rmailbox.example.com'
+ emailbox = 'emailbox.example.com'
+ def dump(self, f):
+ rmailbox_wire = encode_name(self.rmailbox)
+ emailbox_wire = encode_name(self.emailbox)
+ if self.rdlen is None:
+ self.rdlen = (len(rmailbox_wire) + len(emailbox_wire)) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# RMAILBOX=%s EMAILBOX=%s\n' % (self.rmailbox, self.emailbox))
+ f.write('%s %s\n' % (rmailbox_wire, emailbox_wire))
+
+class AFSDB(RR):
+ '''Implements rendering AFSDB RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - subtype (16 bit int): The subtype field.
+ - server (string): The server field.
+ The string must be interpreted as a valid domain name.
+ '''
+ subtype = 1
+ server = 'afsdb.example.com'
+ def dump(self, f):
+ server_wire = encode_name(self.server)
+ if self.rdlen is None:
+ self.rdlen = 2 + len(server_wire) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# SUBTYPE=%d SERVER=%s\n' % (self.subtype, self.server))
+ f.write('%04x %s\n' % (self.subtype, server_wire))
+
class NSECBASE(RR):
'''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
these RRs. The NSEC and NSEC3 classes will be inherited from this
diff --git a/src/lib/util/tests/filename_unittest.cc b/src/lib/util/tests/filename_unittest.cc
index be29ff1..07f3525 100644
--- a/src/lib/util/tests/filename_unittest.cc
+++ b/src/lib/util/tests/filename_unittest.cc
@@ -51,42 +51,49 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/alpha/beta/", fname.directory());
EXPECT_EQ("gamma", fname.name());
EXPECT_EQ(".delta", fname.extension());
+ EXPECT_EQ("gamma.delta", fname.nameAndExtension());
// Directory only
fname.setName("/gamma/delta/");
EXPECT_EQ("/gamma/delta/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// Filename only
fname.setName("epsilon");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("epsilon", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("epsilon", fname.nameAndExtension());
// Extension only
fname.setName(".zeta");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".zeta", fname.extension());
+ EXPECT_EQ(".zeta", fname.nameAndExtension());
// Missing directory
fname.setName("eta.theta");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("eta", fname.name());
EXPECT_EQ(".theta", fname.extension());
+ EXPECT_EQ("eta.theta", fname.nameAndExtension());
// Missing filename
fname.setName("/iota/.kappa");
EXPECT_EQ("/iota/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".kappa", fname.extension());
+ EXPECT_EQ(".kappa", fname.nameAndExtension());
// Missing extension
fname.setName("lambda/mu/nu");
EXPECT_EQ("lambda/mu/", fname.directory());
EXPECT_EQ("nu", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("nu", fname.nameAndExtension());
// Check that the decomposition can occur in the presence of leading and
// trailing spaces
@@ -94,18 +101,21 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("lambda/mu/", fname.directory());
EXPECT_EQ("nu", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("nu", fname.nameAndExtension());
// Empty string
fname.setName("");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// ... and just spaces
fname.setName(" ");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// Check corner cases - where separators are present, but strings are
// absent.
@@ -113,16 +123,19 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
fname.setName(".");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(".", fname.nameAndExtension());
fname.setName("/.");
EXPECT_EQ("/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(".", fname.nameAndExtension());
// Note that the space is a valid filename here; only leading and trailing
// spaces should be trimmed.
@@ -130,11 +143,13 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/", fname.directory());
EXPECT_EQ(" ", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(" .", fname.nameAndExtension());
fname.setName(" / . ");
EXPECT_EQ("/", fname.directory());
EXPECT_EQ(" ", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(" .", fname.nameAndExtension());
}
// Check that the expansion with a default works.
More information about the bind10-changes
mailing list