BIND 10 trac1140, updated. 5220fc55e92504899d102b5704382382a4e450c1 Merge branch 'trac1140' of ssh://git.bind10.isc.org/var/bind10/git/bind10 into trac1140
BIND 10 source code commits
bind10-changes at lists.isc.org
Fri Oct 7 23:25:45 UTC 2011
The branch, trac1140 has been updated
via 5220fc55e92504899d102b5704382382a4e450c1 (commit)
via d76b95edce86919636ee0e458f0b9def08a9d2ea (commit)
via d4405856fd2e088fbc7cd4caa9b2e9a6c66e8e83 (commit)
via 99fbf7cc5eacc8c0ec65a19a1eb83b4e0a329cd1 (commit)
via ff4a86af49e629a83e40f35c78c7616a208659c4 (commit)
via 47ea557d9d8a9782e4d576c45c545913bbaac4ea (commit)
via 006133b49eb5d44eeacb1d79593b97ae2212bbca (commit)
via 261656de6d4fbe2f6979162c978023f8859d2289 (commit)
via 419768f653e098ab566c9f94771552e2bfe3cc99 (commit)
via affa93f31bbc9719ac4e2ccc0e44d9a09c2b6a3b (commit)
via 929daeade2b98ad56b156eadd22308bd6d7f265a (commit)
via 1f77f60db465b53342d88b4280b1889f1888b411 (commit)
via 7ae9a73236a4bb7eed9f02b30563b7f4e040863f (commit)
via b8d12c83236964f6bbb5cd3910b0960abd0117c1 (commit)
via a3c0c4cffe519c5d70185ec989fe707102a71836 (commit)
via d119c3155e617e120c69abebcf8d22e500dcffca (commit)
via c80a7b8a53dc04e66b55431e2d4c637618293dae (commit)
via 31830397285a50d1636622b58d04fffc7ca883ae (commit)
via c96f735cd5bbbd8be3c32e7d40f264ebfa889be5 (commit)
via 973fc74ac4c030a350f436e96d37a582565e41dc (commit)
via 95cbc4efbaab12b66852ede318cb9af0d3f8780b (commit)
via 90ab0a155bc5e42ef2ad35569968dd3db9c501bb (commit)
via 137d1b29b6063f4d1983bde07f6ec5404f67dcee (commit)
via 2342cdf1ff5563c6afa1901104fe4cda849ad345 (commit)
via 5b302edc6302331a2c39ae1ac5a18759e47340c0 (commit)
via 85071d50cf5e1a569b447ba00e118db04293475a (commit)
via 3898b36a132fe44e51cc99674104d9e1f0d35d36 (commit)
via ed7eecab42af0064d261d9c9dafd701250bbc1d3 (commit)
via d6616e7ef66b3904e2d585e7b4946900f67d3b70 (commit)
via c4344fadc93b62af473a8e05fc3a453256e4ce13 (commit)
via a9b140ed88b9a25f47e5649b635c8a19e81bfdee (commit)
via f5d7359a945241edf986b7c91c0ad6c7bcf113e3 (commit)
via cf136247fad510f55ba230f746558274fada1de6 (commit)
via 5f5b2d7a98eff5dc74f74b7018f50e286ae82c2d (commit)
via 7209be736accd15885ad7eafc23b36eec18c2213 (commit)
via 703cd3ed5855e673443e898d427fdc7768c5bceb (commit)
via 1a035954731fce34faf09705bc61b7eb0ad34ac6 (commit)
via ae43bdedcfaabacbc8e4455313e6a5b4d98a68cd (commit)
via 017b4e1bcc7fe62f11650444518f422934c925ff (commit)
via e9e29a281b0b8b9d91fe9097e51c7e5df6d3ff78 (commit)
via fcd39b6e84665a033d7ee4c06bd904e2b416c53a (commit)
via ce00497088209db82fbbabb80381acf92039763c (commit)
via 0fbdaf01b0fc3d7031b51d542b91f6f758f033fa (commit)
via b3a1ea108d3df58dcd2d247fdc87b3d1fbd953cf (commit)
via 2de8b71f8c0e7d02e25aa7ec6fa13f9933c8b534 (commit)
via 4edd9c38112db5161f46533ffb3886c85880ee03 (commit)
via bff7aa9429b7e0a9f26f69dd24c8aa7efc64ffc6 (commit)
via 1e0d70a994d9cf9cabe10d1205c40b74af2a2bc4 (commit)
via 738afedababcfc874fe107d9bc408d69d213813e (commit)
via 8ed59723a5ae90dedcbf741254b65f88a4c98ca1 (commit)
via 3f2d29d0dc92606fac3ba306c34a32a0bec8159e (commit)
via 3bfaa404624697f5e2f08076c78f94a8438e851c (commit)
via f85f868171956abcc1996235a26a276da2ca6209 (commit)
via 1982c235382043d87737ec24779d10da216101a6 (commit)
via f6c675c19790d3715445a7877cc8d1d193f17071 (commit)
via 7225bbf8e6e3c892159124e7795f7396b5764bb8 (commit)
via 2056251f56e4c5e3ff785b924061fecfe1ac21e4 (commit)
via a5eeb73116cbc74f6bb3fb4a06b99396a8ceebcb (commit)
via 743dad9408b0a86052156e6a3d4fec1001600017 (commit)
via af927e2c390b49012b276c11991a3f7ef3a592a9 (commit)
via d267c0511a07c41cd92e3b0b9ee9bf693743a7cf (commit)
via 42968abbd4edf489d4d667089033d11e4045f463 (commit)
via 33c0d21361655c08b274c75736b7bcbe99dd3d2d (commit)
via 956e210239d46bebe4574c5ca38b3b51b1bb7c65 (commit)
via fe76209cd8ad96144f0e2fc9522f5fda1d52d9c3 (commit)
via eb4917aea94d78ea64fa90f0c70501bbb6d48b37 (commit)
via a01b47d66272166135c20bf15a958bed023ff009 (commit)
via 461acc1e4b464611411ae77b7a72d65c744a740e (commit)
via 9163b3833877225c8b9bd8e59eb7159ea65d3867 (commit)
via e451eade196bc7cc43102412a73faa397253c841 (commit)
via 5a2d0b61afe86668613cbb83a75708b760aae76f (commit)
via 43bbfab2cc57a08da6d2d6ffe8da92efcae9c2ec (commit)
via 38e530b762f7d05caaf06aec41c6df432f0800cf (commit)
via e8e1bd309d449b23dae2b472b650a130300aa760 (commit)
via a5adb8e45ee8c66a19c46bd1bf5f752630619be8 (commit)
via 85ac49c5282c231c71b8d2046889d22b0061db08 (commit)
via ebeb5ead60c5c0d7b16478498b78a8f1ef3b71c3 (commit)
via e38010819247006d20532d24de8dd6c37e0ca664 (commit)
via 00f4c38428153bb5ad99ba1cc40e9a204266dace (commit)
via f7bb760f4d8290d52959ea83b090d1877e4ac9ee (commit)
via b29c5e5221b8e6a9ff65a0c39f14c04afaed5c44 (commit)
via 9e17bd49b426ffba00312cf90ec80d178a20b964 (commit)
via 519720d9c6eb354a2e31089f1c7b8fd0760053f9 (commit)
via c6babcd3e44bc42fdb090d3a4837848d8c7c149c (commit)
via bb444bae93e8e87d1e62214b1819fb73fd7634e4 (commit)
via 8fe024cd171ecf1610419abb70e5d613b94ba5a0 (commit)
via d36eda71276b43e4281ae53fd558155725f4d4eb (commit)
via 32f075fa288dc5ea049cbf72657386889144bd12 (commit)
via 471edfd7a86d91f04536bc7c7fb42ad7239e1731 (commit)
via feeddd7e5b966c9445fc4ac97a6526fa792413cd (commit)
via d801b1e2ebb6c9cf35e3475040b013784f3e6e41 (commit)
via e4c78a2739dddade3aaaa12528afff944458f777 (commit)
via 48bec4c73b92679e91f0cc72fc63bdba9c593e87 (commit)
via dc5aa6284fe6b6f51d85270969f0befd8db1f838 (commit)
via 15e60f1f54722c32c9977f00e49c211f047ee08f (commit)
via 85e4dfa61bf440c132f4ce6bc73130bc6e91719c (commit)
via 7ab4a102ee610f36b4362897431e4fbbeac735c5 (commit)
via 054699635affd9c9ecbe7a108d880829f3ba229e (commit)
via d04acfb82c3425a638f09d2f49208ef86bc7a6b3 (commit)
via 434f4fd17dd3dee1d17e7b2e008f1ab1416d5799 (commit)
via ce8b5fe9567f06f7acba34b9e9b35ad471e2ab67 (commit)
via 34ead9dfeff5f64af36a209cae28075fcbbb3330 (commit)
via fcfe5af9c22c5b666e5ecf646bbe0d9da7b655e9 (commit)
via 1f967a8ffe37f6732dd628d28a13abc442541c38 (commit)
via 3efca5f9b7b7bfeac53044fdd44e5add61397157 (commit)
via a35b62699480e149f22f4e039935bfcf41f97ac2 (commit)
via 9dedc72e89b9ca8ba2c5f3bc562ad9ccd1aa05b0 (commit)
via 7808524aa9bbb424327ac67d7408647cb18840f5 (commit)
via 5b866ef26bd5ae980bb86c494a592ef232552b68 (commit)
via a5387c15e93c6d1925bf4ad0eacdcfd63790c32a (commit)
via d56c782197242e32ccdd23c9e3652ff520f3d58f (commit)
via bd8cb42b61666342ee8bc6c33aed2a168301ff67 (commit)
via 9accf90bb081b057023479f0a86e54017b02cdd3 (commit)
via 9eafb04ee8dbd47022dd9a5e5c1310f88f398d2c (commit)
via 7af1aeddc36a1ac1343f1af12aa29164f1028f03 (commit)
via 15f5d7895a2744376062229cf19593016a773cde (commit)
via ddec42c7a23cca11903ece8f7ab614dcc7e5edd3 (commit)
via d8cac904c7aea4a652a47afb35aceb6ca4808ce8 (commit)
via 433381e5ca62418fc90377d16f1805260b27b619 (commit)
via c8bbdd1d74ac313d8b57d8debe4f7b75490e5df2 (commit)
via e57c5196d3e8dd56b0190799c98b56a5be55333a (commit)
via 06f7bc4b3b69e8fda96f6e626a7dac5b1fbbb233 (commit)
via 0aa4c14ebd1eb0a68c2bcf5c617325596657ea71 (commit)
via 9daa2f686b3bdb03b13e9becf45a722344888cf3 (commit)
via f159ac66aa577889514dc170c87a92c49be5a6cc (commit)
via d6b86a88c7a486f2e5b742fc60d374e48382320e (commit)
via 5ddc441f77a34158039f0328c3ab7c2106b7b3b8 (commit)
via 290e89c515e051dad269f1acbce0b52a541d9c8c (commit)
via 9b8925a4d0ecbd8a09d307dfd56fa15fb8eedcc6 (commit)
via 53314ecb63f3f0f85629b66a228207658d8fd73f (commit)
via 863509e2dc3bf96fd38476d787abb62e0da46624 (commit)
via fe1d6665faf06b3fcc0aaf8ec72905aa4b7ce1f7 (commit)
via 7581a21a7dce1dc6b92ad24293b4269a3531e6d4 (commit)
via 1fd37ae8a4bb25a6e85ffb2158b2ae95fe8cbd04 (commit)
via 8ed3b760c179df435882f2ad96b6dcfad5b6e9fa (commit)
via 3516ab551851273faeeb0b8696695e5f3ffc88f9 (commit)
via 9f8ddd6ee1b73c9403f85b6ef5c85605ca393aa7 (commit)
via 898485cd30084d478e8be688151cd11fb4d492a7 (commit)
via 30f4856101bf23ce155ef0f2ebd1ca6f034d2420 (commit)
via eb4be17ddf3b26c379e3f100cf8e8b0fd4329537 (commit)
via ac06a06d1df9a1cc905b224b79921b0d0ade4c05 (commit)
via 611d0300fb8bb2e87d787023cb5c6030ee07d8d2 (commit)
via fdf02d580f2bb1fbc6fa85ee0edd81a07404d1de (commit)
via a0bb482b46bd05f8c8774bacdd26dc891cb3bef7 (commit)
via cebd7e3562312ade50d972af49239cee7f10d057 (commit)
via 8750dc3ab772e29d7374d779cefb3c8b8c61d2d1 (commit)
via b743e6ba98c8cbb53c45e1c0f59e5a78ba62f5d4 (commit)
via 6556a2ffdd7bdb5370c2f1b3d8c9e8799ef82140 (commit)
via 3e9189a483c0f53eba4f05092c90f7955123f52c (commit)
via 7f5702a379516cee041129c03dd37d67f26d49c1 (commit)
via e60ecc91ad65087c3cff3af479cc455abccbe020 (commit)
via 62bd7736311e166aea3604b8e486b58c1315f82f (commit)
via 9687077033661cf07b6ea2e966299e837a501612 (commit)
via 703d5f36d0102993f311d21e662a28492d8cf7b4 (commit)
via 84d9095c66c765cf78814323597b2e3bbef293d5 (commit)
via e54bc83c4e8a66fd9ab1ae9f27899d70ef82a066 (commit)
via 1a8c86ea2503bffe6dc1f2300dfc2b4efba108cc (commit)
via ed5311a26b7b1368f28191c405ec13da907213ae (commit)
via 493a6449b37b34ac5fe36257b266c229e34d105c (commit)
via 6f6a4cf9d98f2b4550e0949da1e20a7f38440610 (commit)
via 36a53f41a7da580926111dca65652d6389fcd909 (commit)
via 61681dac2023240a4a029072add3a39809ccb7f0 (commit)
via 96dd4d2daf1fb91672a798fa478da0ec8a7ac737 (commit)
via 9354737244e0bb7c22ec684ed652c89991eca913 (commit)
via 9bbc77b6b8381c9a6d831e490a7715ba84b9356f (commit)
via 8023760a5fc6f346cf82340aa50df755b0d0d00a (commit)
via cc0d6e4674fd2e6ebe3775a28ec87fc5c869f924 (commit)
via f9cb0d187f02078b27a0119ce42c83f62461a507 (commit)
via 4fda2b6eefe81f1c197d32a0c8eb14ca1a7d9108 (commit)
via 106e9a793499c81698cf5a938d48933f5e909af4 (commit)
via 26691e282b76d74959e63524b280e77b09ac89df (commit)
via 4cde36d2b97a24f03c192a61248545d0180fb856 (commit)
via c874cb056e2a5e656165f3c160e1b34ccfe8b302 (commit)
via 12fd115d2e1ea8b55f43313ac665c32e07f9498e (commit)
via 84ada921a2fe98489b578b6d780c1ad2e6c31482 (commit)
via 763a994cb14bb11ba823831f54d64071319bfac0 (commit)
via b86d51b24e7d1bb4980426c9a74962628c096ba7 (commit)
via 48d5ac59277e2e8b43f697a0d1d4b0991a40caa0 (commit)
via c191f23dfc2b0179ec0a010a1ff00fa3ae1d9398 (commit)
via 8d2c46f19c1b4f435d7b9180ff6c2e8daf78ab2b (commit)
via 80319933903fbdb359ef9472573bfaceda7c8cd5 (commit)
via 8c838cf57adef3c004b910b086513d9620147692 (commit)
via 1378551aa74712c929a79964ae18d9962ce73787 (commit)
via bb7833f2054edca11a32d24d17486f153db00ec1 (commit)
via c430e464860b4460a0ab32454e53918a1cc7444b (commit)
via 39e529c506a4350cd676bf5ddff6d61686e8814f (commit)
via aba10a01b765b472d57112fd4e09a6fb47b49fa7 (commit)
via 9688dee697e9ad279c6542bf164b820e907e526f (commit)
via c1a72c46b572eee2d94ab53a5589c724fcb1fcf1 (commit)
via 9016513b4d19d2781d0b6f2575b490431e04ec79 (commit)
via 13e8bc43e4888fe9e6df7e536ea0b439c6351199 (commit)
via e89895b7e5f3b7074271c89de281e426c53be347 (commit)
via 938f4e9ba14954551fbc390abb7d1e06d38189c2 (commit)
via b0b0da67c915f3c02020397b8dcf6a078a9b3a90 (commit)
via 1ee8ad4a2b092a6edc35c111c5a3b5b761da0dae (commit)
via c943619d223be1158ae8db5223f655343d06785f (commit)
via 0d874a95d3c782b9c663c64be619f449956df457 (commit)
via 2d325650009f46a1f16ef2e7c1f4ed0827db236f (commit)
via abe73e885b980aace1fd0df492fa321bdd35f01f (commit)
via 53d45f54e33d23a5b4df42dc977a3a6ab597f5c5 (commit)
via 338b54ef4631f0d35601f174eabfa10f1541f46d (commit)
via 698176eccd5d55759fe9448b2c249717c932ac31 (commit)
via 41cbf5a91bdfa0b311aade6b05d2f51f59cce978 (commit)
via d845ae918fe8dce6806c3f927a7c101fc0e2173d (commit)
via 7bc93774a449b3f19748a37186db3efcd3d6c537 (commit)
via d5a58bbe641d32257035a6087f18655e7b66d8fd (commit)
via c64c4730852f74fff8ea75730e0b40cd3b23a85e (commit)
via fdf1c88a53f5970aa4e6d55da42303ce7d4730f7 (commit)
via 33ee923f7139cbda7a616a83d572a4358f456e16 (commit)
via c69a1675dd0434db0b99682d14fa7905fcd3af8f (commit)
via 9b23d60d6f58b18da3995dc3e090d7fd63233bcc (commit)
via 4bb4081381b39c563707c03818a0f9d16ef7846f (commit)
via eef5b0eb5defdd22ef5e351213ab66531f788c5d (commit)
via e7f1ead205f2dc13d6fd6e2a28b121794ca281be (commit)
via 638674c480d47cf957a8b4f7d61dda3320c881ff (commit)
via 0a22b98c05bf5032c190fbfdf9fefceac3597411 (commit)
via f59415a8b5ee951dd298eaf8eecaa21e8955851c (commit)
via 4e458fc15b5c236e1cc44565f6af313753e87a26 (commit)
via e2eca96f1876a72fc8c121c9204d49cb7e9eaeb7 (commit)
via 4a605525cda67bea8c43ca8b3eae6e6749797450 (commit)
via 85455b6e2f7063b10bae9938de1b70f5d319911e (commit)
via 66e1420d30f8e71e867a3b5b0a73ead1156d5660 (commit)
via 16cc75f764b6ea509f386c261b472e282cd606ed (commit)
via b2d2acebebc66495b98eef634ce633eb70cc2411 (commit)
via b1f197c6102ae31ded2e4b61103308dcdfa72a89 (commit)
via acb299784ddbf280aac6ee5a78977c9acbf1fd32 (commit)
via 2418922a1389bbf265b02328f7c4f594257c4026 (commit)
via 44a44c0b568dc997e7522292212e0ef02b522f3d (commit)
via 250ce2abb3d6b48fce778b5e0c651d57582aff7c (commit)
via 99be45a44f97942f9327b16aff368f1650994e0e (commit)
via 7592596f7a9f8dce2e5e8d9311cc40c5199c66e3 (commit)
via c24c42a5e29444313efee6528f172ad66452050d (commit)
via 5e14c4caafaa44b92134c5df01b726f435f46845 (commit)
via 05eaa177051b212669c2a7b9e2194c3e9ba47f14 (commit)
via 9797d47ab90761c50020f78d5a55fb2672ffd7c0 (commit)
via 000164d51a974acf3846a6b0a7795f484e915161 (commit)
via 0b46c391a973bb8d3f0a1681eb0a79e8a196f0f0 (commit)
via 5e5743ecb40da81c4e8ad27ac8b158c9a7aaff87 (commit)
via 9c95bf79406ae791e2f8c7263ff4fddb19d0eda4 (commit)
via 7dfa14ccdb6777ccacb99fe0d716b7d63654426f (commit)
via f0ff0a2f69bcfae3e2a30a3bdeae37b475ae9106 (commit)
via 38816f95cc01f1c7aeec1d42bde3febb308dd98f (commit)
via 0f8868d1ed7d479d05e2a70de67897d133d41ef9 (commit)
via bc03b37015ab6ea23cbec70dbd299c74fb001aba (commit)
via e56e0f7d1ad206f1ebc26e285d82a8e7ff6390e1 (commit)
via 7d2b0148161460b928cf39c7c2969d95d2870d9c (commit)
via 58b843554162e6599ba895c8325985f74adef734 (commit)
via 98cb905a5852321204499985efb42c5a76b9da6e (commit)
via f7a92e4b0336f3c64eb429947657952178b7d76f (commit)
via 3ff9c6c215faa2e1419d4cb67906a1f7772b355a (commit)
via 90b3952ff515f8746ffc6b227695836921bc046d (commit)
via 0372723794501908ae94be9330dcd8577d951f68 (commit)
via 6b27a7ba1c0343725e3d2e9ea7d97426a8f73f0d (commit)
via a8b5aabeb7b56702a85344434d7822a034ff140c (commit)
via 87a3c86e7e132a1ee80bf29b418ad4b61cefc7d8 (commit)
via 8b4f53f245ab45bf07be9b1108fca951133b836a (commit)
via 07b6398dbd11037eb553fc6fcf56dc8051e71150 (commit)
via f0ef6c88066961a038ea1b80face4feaa9a2d17d (commit)
via 8f9f4ece764df4607f695f3f7eb4c421e8ac4c9d (commit)
via 7751d0ac43f1b7186a53ba5dd5cf2eeca6f7dc46 (commit)
via 40cd22fc64c7755efe60cd42cb12851cf3de55a4 (commit)
via ed8d686171f140fd12164d2d34f65b4ab3c97645 (commit)
via 1e32824c93dac7e406d1b35449b42700bf854679 (commit)
via c5d5522f83888a8b442aa7ff17738f3f688749fe (commit)
via 688867daa34ade5075443c77535f80e1d2d76743 (commit)
via d36ded7d95a695f0412f6ccdb59bf55fc600e9d3 (commit)
via b8e90124c19177e0b6b33bd624e244860e2424b3 (commit)
via 5cf1b7ab58c42675c1396fbbd5b1aaf037eb8d19 (commit)
via 17d9827aa40e363650d1698fddba9204f27b5171 (commit)
via 27f447c8b054b17d96abfba431568c1ffe017f0a (commit)
via 219818389cc848dc2d67aff732b9790968851b51 (commit)
via e602f86dae29c62619b0ea8bf2ca69e1ce1b8295 (commit)
via 57f7044d690d38cff90487b5883883a674d2589f (commit)
via 383b6b2891226228ddf3cfd4c3dd8b17ea186b8a (commit)
via 8cc8f4c008f640b7f13f8f1160261275ec14475b (commit)
via b6dd72042939ca62d9ceeb80385eedc7c5f0560d (commit)
via 31e010330189f489c624b7cdb812ef3f33f8e280 (commit)
via 70bba1b3f811261fcef30694568245e83cd64bc5 (commit)
via 6c5f8867a45f40411594372bca09c04ddf5c0002 (commit)
via f1fef139dbc592aa4c7071d47e38e14487ab72e7 (commit)
via 2c8b76ed408547789f2e26ad76773e40e316a392 (commit)
via eefa62a767ec09c20d679876842e15e9d3742499 (commit)
via 58845974d57ee0cd0b261b00d1ededccc7bde105 (commit)
via d49e3c5e79e00b59e518c4bc1f71882adf721696 (commit)
via 06a24c688282b61dd2ce5b6c00608bee34ae3563 (commit)
via b902e70583a9dfb1ee410e297e2da4c8b944ba8d (commit)
via 09349cf206ee9e68618713b97e621b7ef2a6c0a9 (commit)
via ff1bd2a00278bc753a7d035fd5020ff936df1882 (commit)
via c89f3a2f43fd7fe70bcb199fad0ccf94364b1ebe (commit)
via 4c86025464db4603ec07490169aaf4b77868057b (commit)
via 842fc917163f0b8cb2a703a4c7fe078d944932e8 (commit)
via 0eb576518f81c3758c7dbaa2522bd8302b1836b3 (commit)
via 68cf1ccf20ecfcc1e06de69fcd50d13cf8b5e1e0 (commit)
via bd0c874dda60a0f5e235b653e1bb63716cb385f8 (commit)
via b6709a7001e4812c4ed774ef0ff3111fb654d199 (commit)
via 9b4326dc093b71bcd77a527111ea6778795bf068 (commit)
via 2c5b2fc19c21dd12747eb960baee65759847a118 (commit)
via 0aa89cf84c78a9ee8b97a51c17b3982324021f81 (commit)
via d9dd4c5a7438c152f6c9ae2bcc4c9f5ee598728b (commit)
via 03da93322b956e003882c09a8d4ea949f790dbc4 (commit)
via bfa93c0ee79935bf37d379065e219ba0afb0c4e3 (commit)
via 7a061c2e82d62e2b275cb5a8d7460dce7d36f050 (commit)
via a6cbb14cc9c986d109983087313225829f1c91fe (commit)
via 7cc32b7915532354ed7e2fd15f7ca5a9b9b64610 (commit)
via dd340b32df88083fdc17f682094b451f7dcdf6d6 (commit)
via 30c277567f64d09c11cadcb173eef066efdaea07 (commit)
via ec2793914d1090db8c8d94a2f9b92ed97b1a6cba (commit)
via a59c7f28a458842b4edce2d6639639b17a85eb9f (commit)
via 766db4a6100e34e6a29aa9c849b60ba80b551389 (commit)
via f7b5370a9bf82b0b480b75275349d8570ee83c4c (commit)
via 12d62d54d33fbb1572a1aa3089b0d547d02924aa (commit)
via c38112d8b59bfb6e73b5fbc637fa9eaaae42c52d (commit)
via ccb4c0aa696918c579a0b80448fc93606152ec93 (commit)
via 0fa8006ade38ac7206ff57934f3bb866be6407a2 (commit)
via b25df34f6a7582baff54dab59c4e033f6db4e42c (commit)
via 715fee7daf2f966261d997e1b39888f14fb28a45 (commit)
via c3424869801ea8811106f8f97928ed5cd71efbff (commit)
via 4e544fba3459913e23f86dc5e628665bd288c483 (commit)
via 259955ba65c102bd36ec818ca4193aab311e983d (commit)
via 1f81b4916fa3bd0cbf4f41cc7ad8f13450aa6481 (commit)
via 6d6353cea42ed088df3c2c90c4c2741a1b8b2871 (commit)
via 7efa61c40b94d3234dd7fc79a0fc7ae0f1b0a105 (commit)
via 5c3a7ca7b3b28a7a163b0af3cbadc3d8fe7a702b (commit)
via 54c6127e005c8e3dd82cd97d49aca23f5a5d8029 (commit)
via b6261f09b53af42a26d88fd50d74ab1e84524cce (commit)
via 8634aa9cab1c2205629540b4d99b88847148bd80 (commit)
via d1a1871cc6c93ababba62f42bcab5205320b8867 (commit)
via 2a5c5383e3df0e625367bf85b740f62bf777b211 (commit)
via af10f1ef696ee94f817bc389e0e8b6cd08234333 (commit)
via f16de89251e4607eb413df666a64022c50478a4c (commit)
via 3eb0dedb8a5d9835b394484c6112a4b2fcbe9d51 (commit)
via 2f8c4b3da6060a9b57e944726dd61cb1b2a19906 (commit)
via 4e93ba217318854742144bf0b8e30f4c3614db92 (commit)
via ee468e8f02f1cd1bcf09da75170ed62dc230b70e (commit)
via 433f29fd44d8dd6c940e49ee2657b769d70781fe (commit)
via f0274b7451761b2dc48c0be148ecd8563c9800da (commit)
via 45ef63790b34ebc2d26081609bb168aefee800dc (commit)
via 38d80ef7186ac2b18ed234a825894f5f78fc90b1 (commit)
via 88bee2515653d3b5481608bc92a1956c7ea7cf48 (commit)
via e9286ce511be095f2b16b1b7bc503b1e4377593d (commit)
via 723a6d1f333f1d513d5e4fe26a8ee7611767c9fc (commit)
via 88fe1bafce118f40d256097c2bfbdf9e53553784 (commit)
via cbf08d56345922d754182b941b84b18bfddabcda (commit)
via 84a95705e1e8219187e75433baec2fd2fc8ba2fe (commit)
via aa5fd84d438cf165c9836fa545d15c33781401af (commit)
via fac67afceead36ba7296e194942811d9ed3b437b (commit)
via 90b740caf4cc5d207dfa2ac98f1c73d9818792e2 (commit)
via 0ea828cb5c74b0f9a254aeab2c7d31ff214371e5 (commit)
via 170a0661dfb17014a62cd2eeaaa99e408bc55a14 (commit)
via b12f4e55007ee2e8130991f322e782bb31a8a289 (commit)
via 18083458382473b414a3fc7f57623d2241f487ef (commit)
via fbe4ee1f76237fdd586638ce1ded4c6e5bd0bf1d (commit)
via 9c53309978b4a4bf684b3abbb853876c5413f875 (commit)
via 8ee5844e8dc3ec7d99a5890bdc85f54afd8886b6 (commit)
via c9ad781ebbaebb2e57956ac9eda542eaa88a743b (commit)
via 9f441d72a245e3ccce2ee014adaa0ad62e7b0d29 (commit)
via 51c4b53945599a72d550d7380c7107e11b467d5c (commit)
via 4d39f72b87677c194d282a9e93de67dc0adfb4f3 (commit)
via ece8bd155e646869b10fd08817ee7cd71c699c61 (commit)
via b59f898456b33294d71a333d3f3b4fe9dc81e3dd (commit)
via 84d7ae48d44e055cb16e3900cf2c4b2262f6a6da (commit)
via f8b10842465d60483e3bc9827e06115ea8081bfc (commit)
via 06341cb6cdbd5ff57c376f7b0b25aba4a35bab86 (commit)
via 54aad8af04350eb3a45a4bd6623681efa2f8d2fb (commit)
via 61aaae27e12db2a00cfde674931e5080e733e6b3 (commit)
via 3089b6fd6eff650dc06c0698b80eae1595986677 (commit)
via 3a9dc4fbd7dab867829ba3299d86c2f5b58d864f (commit)
via 5859f177250685fbd49c9562ffc3e984b9d5ebae (commit)
via 4948e0c8965c3d39b6e1bcb1bdb12b9615260a27 (commit)
via 59e2ceaf7b75c38391c518436a70ac3d41b8c8be (commit)
via 4e3c6c5e5b19be3a0f970a06e3e135d1b2fae668 (commit)
via 03e9f45f8a6584a373f1bd15f01f56d9296c842a (commit)
via cb4d8443645a5c3e973b4e2477198686d8d8c507 (commit)
via f847a5e079ceae0346b84fb320ed06ce9b443a63 (commit)
via 05512e090c6c3cb852cebdb85ae7c12e8001603b (commit)
via c35f6b15bb6b703154e05399266dd2051ef9cfa9 (commit)
via 3f2864bf1271ca525858cf3e1fa641e3496eec59 (commit)
via f8720ba467d8e107c512160a5502caf9be58a425 (commit)
via 38af8a4225e8c82564758e8a5629da438220bc87 (commit)
via c5e0db2b7d8fbdb13548e01310f623f131ea0e9c (commit)
via 26c7bfe851f00422beb442a77d25cc0887557b79 (commit)
via f5239632a06383f2b4f6825cb6a006ceb8bea417 (commit)
via 680f05c35753bf1f70392d25b1e6310cf46476ce (commit)
via b12351c21ee92a13536aa89331cc73bd166dbe5f (commit)
via 2e1dceedf6a4f661a8d7e57757b28f9f6cb1a9b3 (commit)
via df69ad0d0231218610f68ecb2b1953ae7f28fa68 (commit)
via 5b713ea8e5fd35fdb1ab7ff953e010ef9b60f98c (commit)
via 02b2e71bdc1564f4272869bb5676727af809870f (commit)
via 8d1942a3b7516e8161b7f54888da2a4a4d27484e (commit)
via 856ff83ad2b97c136de1103a421547bdcb332e74 (commit)
via 7cc9b08f18967fa1a694f5b7e320aad62d0d3e88 (commit)
via 25e56e5d1bc9197e882e3a42285d0efad21a51f2 (commit)
via 87d2a8766e610a0dece7d86268ac9be4122d6d82 (commit)
via 64ac0166d5ea3b565f500f8a770dfa4d7d9f6a28 (commit)
via c86612cd4120b9ad3d00978c04ea252e7d501e44 (commit)
via c1c2ddf5be4556e6e8cd52a314ddd6d026c7e540 (commit)
via ba50f189eced101999efb96672179aa1024204e9 (commit)
via 6906362bebdbe7e0de66f2c8d10a00bd34911121 (commit)
via 83a58b817e5c0432d543b66208f502b059fdbe13 (commit)
via 40126733cc69634035b0cca3a0c90ee3a606ea3b (commit)
via bcafb8b98d5df77108a83a6bd8b7746f7c2616d7 (commit)
via 4ef59f25a452f934408a9ba837cea9b7fab0be48 (commit)
via 3d069e2745070bc23f14c845cb7d8116d919f0da (commit)
via 230df584722d08705f2cb3b99940b764b1cb7865 (commit)
via fda403b09887a24403c3a90d7ad6c95288f2d641 (commit)
via 88095bed9cbc3e39c61eb0ea7dee1646ff13ac7e (commit)
via b557ab47f3355f5fc7d4f87dfa9e4a15e7e9f3e3 (commit)
via 04b04226b726b6e1fea6bba970556b9ed5cc3446 (commit)
via 3a838eb454ed0de4f073b99e94e02014eca63a56 (commit)
via 748c3e1aeb833012a19b651af7d98757a8ffc50f (commit)
via a0e04c0ad837b4b42caf139573f2a95c86cdac76 (commit)
via 4e12574323ca3db3e985acee0540c603b2b33124 (commit)
via 3fc53ba91b92ad40ebbf46272f57a45e3d2e3a27 (commit)
via fcb2409598d37e2078076cf43794ef6c445ac22f (commit)
via c6d2a365580709981852007cd0a9a3b32afaa5c3 (commit)
via da8bfe82aa18a67b1a99fa459f48cea89ee2a41a (commit)
via 7980a6c8e598d34f5f733f5c6c3ca83c0a0f1187 (commit)
via 9c62a36b0ebf9ff4ef3dad1f4d91195d301348ed (commit)
via 2ec9338d84714ea670ee888f1edf5a4ad220ea9a (commit)
via 1d907966f7f0fe7089efe46d8b808d9115f0d167 (commit)
via 93327a85ea63f7043c49a0af2384a1e274ab1dda (commit)
via 75e756cdf9d5b08e859afac5cef38bd818a90e60 (commit)
via 778bd1be6ced7f4a135e2a6bcc7414c4e4bdc27d (commit)
via 38c8e9a9ccfd7fd57bc5fa5090c86cf7b7920d28 (commit)
via ddf9da5175b1182810838861f1464fb05fe00104 (commit)
via 8fe581570c2ef4f881762f4f22ef4f66c1063491 (commit)
via 2812fa5cb0c2013ef1696888651390aa71a76b4a (commit)
via b131dd71ce147b4efcece9dd8fba16c51fefa492 (commit)
via 84d83c1d8979e2906971af79f2e41083299beb7e (commit)
via 255bf5b18e2b0e28a65062e87dc2d1212376bfc2 (commit)
via e2ada81cd2a090f707147abdb73a90d44db2f2b0 (commit)
via 0953b3f5d7ed1b4a25362f9a2d1a41eeeda8efa6 (commit)
via 8d380bb47dd24c7fd2c4880a4106835d871bf4d5 (commit)
via 77ba8639c274865c762eee688383c321f18ef889 (commit)
via ecf3f4b962026aa9094ee321b03ee32df2fdf1d2 (commit)
via 30df43575158b0cb294ec49a8463fe8b49593e62 (commit)
via 4c0accf0a591b0422c84216150e1b9b4e008609e (commit)
via 1f051716bce3d7aa2545722ba41958df9758cadc (commit)
via 10553ed4ebb5b949ae74d277d398d2e8a3909ea5 (commit)
via d916aef6af6bb8506b1ff4756054a1697410982f (commit)
via 4700bada6282f5ad10b53cd8ca7cc03b8fea791d (commit)
via ef64723fe9638f8d56f58fba44a149ac620eadd9 (commit)
via 5de6f9658f745e05361242042afd518b444d7466 (commit)
via 3f847f9d35bf2bf9ee0d957ea1aa9ffb27a32cdb (commit)
via df047c5ccb5c81f9a3d36f7fc38a19bc7c8f2ac2 (commit)
via a7346d50ae5389ce37e35a7131f0f218663b8c68 (commit)
via ad91831c938430b6d4a8fd7bfae517a0f1e327c1 (commit)
via 43da3c6c1cc7cb5fcb1dbe2f983a53e883408d1b (commit)
via fb3d0e1146e9e5a36a9402690a09e7629408c677 (commit)
via 27b3488b71a5c3b95652eab2720497d6d055346e (commit)
via 087c6def9087019640a437b63c782a5c22de1feb (commit)
via 3b0ccfb2f23961e4cbddb9d0873bab0f4c1d4c3d (commit)
via 0a39659638fc68f60b95b102968d7d0ad75443ea (commit)
via 2684301690d59a41cd20d131491e0714d156fa7c (commit)
via 5baa7aa73ad8d8d5250990a9e330b9b746659452 (commit)
via 1921e1297dfcb878b9417edefe4d87639c827948 (commit)
via 872bd5756ba8b5daeeacedfcd4ec38bc50035ec8 (commit)
via 67d8e93028e014f644868fede3570abb28e5fb43 (commit)
via 4ff5e524a7f79ad7f4513ebed3ca0990392263af (commit)
via 5157876271e945703ee699f07442ee1a72bba362 (commit)
via 73df015104eb5ac8934ff1176c24079e6e9b09c3 (commit)
via 586d49827ebaa2cf2c70dc030c5830afb1fb89f5 (commit)
via 2b755575c9d0277980008df99f92c38dd6b3a420 (commit)
via 38d1a8aa943424e1a0de0503ee8aa961a95d0e14 (commit)
via 4579a2a9f43a38144539447bb5076bfcbaf8b6d8 (commit)
via 58d7fda0fd2efc2d4bccfdcb55ce6ba42af83aa0 (commit)
via 7f08fc3123ef7d26a2e61dd29455c07510404a7e (commit)
via af6328603521584ff62b25a6f86a923bba5a4f5e (commit)
via 9d48d1964569b49be17afc3e20085a23544a32de (commit)
via 28988a78d3b80c7f1080fce696acf176b74a29fe (commit)
via 5c6391cca55baec236b813b4c2e2b7699595559d (commit)
via 08b5add9a6e405342c0c8bc3bdf5d552ed45df0e (commit)
via a176724d99c073f8e547dea2675a5b7d1df70515 (commit)
via a9b769b8bf12e2922e385c62ce337fb723731699 (commit)
via 6318db7dc90cb6656cc2a1f8e875f2258f6a4343 (commit)
via 35a0136d56de7faca280666ba40bb1b87a85fff6 (commit)
via 3a11f2fd5bbe98fc555bfdf1cdf9019f7222e3e9 (commit)
via b97162729a3ad4214e5f6b85452a27904b8f34ca (commit)
via ad36c799ff07d47ebd5c861c63e9feef50408e34 (commit)
via 9d3e78f0d8075ad62391ed005e1e82f79f05e2ca (commit)
via c5e0ebf85ef50e61457f3b99a05109a92b328573 (commit)
via 8216d5dbe1ef23d56ba589fe1de619a601bada4b (commit)
via 1c834de994f51a1fb98add648dad49abfea2c403 (commit)
via 9622aed753d953a763a9c0ac25cd7868d257bad7 (commit)
via 01d8d0f13289ecdf9996d6d5d26ac0d43e30549c (commit)
via 7fe505d131d2a13a6a412789474d92493ade65dd (commit)
via 353c08576b5c7fae46be834cb815df744ec2ba96 (commit)
via dc9318330acbd36e07ad5a4e8a68c9a6e2430543 (commit)
via e4a17a0630a6460090c5cdb562e02ba992a74fa8 (commit)
via 954143a2748110c720d28df49159ed4f0bc1a1a2 (commit)
via 21bac503aa78b1d0cbb6993edc083fbc508dad16 (commit)
via ee826c177bef06f22cdbbf82044085972bfd8737 (commit)
via 4111989bb81641ee36fa94bf5cb181aa18f5477f (commit)
via 8cfa0f76baf92f82bf2865b3557c0a2094e81cb4 (commit)
via bdebd1afa4bf82120c66d9ee8d8cab500ab0b606 (commit)
via 68653f1c822916ceade94511168f87adff74c235 (commit)
via 451086b203ef3e4611487630225a7650ad9322e7 (commit)
via c0c7b21ab57bb9445329fed9e1451c534aab6a67 (commit)
via 59add6ec0f7e96ee81a7b9970228b8f795b01997 (commit)
via f6463fa6e74a32e3fb28f150247e11d0fe073782 (commit)
via 1b421982a6fcadebc72d3d6ee7a4e34eec61a25d (commit)
via 45630ca90e823247c429f82b338244a9bba9baf4 (commit)
via 36c6035855db0ae87a64a0d169e0230d936e3e64 (commit)
via 5ece3fe5e40efbcf7d727650475c35850624cfaf (commit)
via d88becea33630677dbb5123cd72fa8695512311a (commit)
via 171088e69ff96a2e242cfdf98e8d1f0415d4c172 (commit)
via 568a8cc472f3207b44b92428e7ac40338d9ede37 (commit)
via 9a8667331d9a7179331516e7bb1f3aa942bf8218 (commit)
via 4c98f3dc47545794daccd4978103f6b98236ad82 (commit)
via 2dfa0983e4680f321a3d4f1bd0d826abd88f455c (commit)
via ec8fed8c805b513ea15ad76eb380c639dba88548 (commit)
via ce3be84b9f772fda5f08947fec92764119989019 (commit)
via d60bb44c243f27053589b5501529b0001404373f (commit)
via 92dcac243a4a2924bab85d1519a0c7a20853f9cc (commit)
via fb7c63f65c121b372b1ea23a823cb17afdcd1dfd (commit)
via 2bd6dc4ac6ac61705517df297320fa79b308b9e3 (commit)
via 58d6de47f6e189ff0b648b4f2f74e6d5df85d749 (commit)
via 2ecb4add323e3c4ba56641d28e35dd79013ff9cf (commit)
via 540c6a5f5b25d935a8193fd835c1ba83dba02fd5 (commit)
via 507b231626a2e0289288f48b1e4613b569cdd8b2 (commit)
via ea8bdd6cb6894855f109b8d19ce104ae9a4b9cb5 (commit)
via 4a7d63179ae732ede6bdc77c393a1cfd9b0b58ca (commit)
via ba9f03b99b6e1dd46d9b11eb1bac629789c8f94a (commit)
via 6ea996c67dff319e332b465ed450ee50b97de4f7 (commit)
via bbc661e3c38f02b4a1fb50bd4e058a22150b0087 (commit)
via 373a792a4706be2619dd1d1820f949858620bc77 (commit)
via f9b1950752ff1d3041d776a5d50ec2d0ddb8065a (commit)
via d63056e7cff35f58898a9bdc8d5cad589689590c (commit)
via fe8f3314300936f71cc89535ecd3f0f3cad3804c (commit)
via b4ae924f504e9749989059a14e6a5dc830c99e81 (commit)
via 2384bcf387e93435658ec1ab92addbf28c9ab640 (commit)
via 1d314b2544b8af8a936c90e00a0dbbb605410952 (commit)
via 2bb551be853647c25005d1ab167e17ada7a5bfc5 (commit)
via e3c81bd07046903b4b3bff8325024aafcdb35cba (commit)
via 9001f1db99dfff10957dc2a971e7466a496f0f2f (commit)
via 616fb3be8c0b3c266eaf0aa4ae399918fc7992ef (commit)
via 7dd0238dbd4ed086ca7217ec50d8f0a5be3179f3 (commit)
via 7a9a19d6431df02d48a7bc9de44f08d9450d3a37 (commit)
via d72e84456e23ac19c2c12a186ba429cd2e4985cd (commit)
via deefb84c32a289f8deda6550518a48b01a6032c0 (commit)
via 83f8d6de769a33f51b83cd81efe178db162e95e1 (commit)
via db9e3c398b854c83a65eb227ab9ff40dfae1145b (commit)
via 77030a4789285a3f08fbdd9621a384a9e008f4a8 (commit)
via a030033e5a53dd18157509c6c101340688d16011 (commit)
via 13e236a3d647d15858b061c7d96288bf7407e090 (commit)
via a7fe0d5982813f092f8a497d350620c02b995649 (commit)
via 485e0ba7f7fe11e4d28e3eec2be835157521a6e9 (commit)
via 6a55aa002c8f3b701dbb8291cd9a8e21534c6974 (commit)
via 7cdda20613f7ed7b18e7fe210ae0f6a87054dbf3 (commit)
via 745ebcec892cb27feec663de9218ae3647c7b8a5 (commit)
via 1e702fae4c9adbd7134a739dee28c868a15f0b3e (commit)
via 44bd4bc6dc7df56905071933a542e00e91f84837 (commit)
via 006d0fab3f44ec9caa2b23da3866bbbd841cd5d3 (commit)
via 68da925f226966a2760a193e9f9a3cdbdfcfacec (commit)
via 09e8c50958a1fca313c2be427c2991c39798f90f (commit)
via d1b580f1780e5ebdbbf6fe8655cc923fbd5c02de (commit)
via 98e74ad62b23ce33f66e3841431511136bc1c2f8 (commit)
via 0fe4f0151ae7a994aaf305e7985d4ba9f992e482 (commit)
via 9df1f04f8b1f7091ab32dcd56fb6e47e3e96d5a7 (commit)
via 691c232b2655673ac352beafc0bfba4bc966f8f8 (commit)
via 6ad78d124740f1ea18f6f93721ec6f152364e878 (commit)
via 5253640054d48f7816aa00c803f5bc593c0c12c1 (commit)
via ce052cd92cd128ea3db5a8f154bd151956c2920c (commit)
via 6dfeded7b6f2f78a2d45fa54543a5962bdc6c035 (commit)
via 810c79d6d9b8efbc12ec8e1ad727cf002f2dedc6 (commit)
via c74d3b7f393f3934bae22fc9d3a4a49e2211aadb (commit)
via ed6fc7857e3fe7d64f19a0bed27226964009f095 (commit)
via b06a3e2ba1febb9e34458c5106f8d1629a191d5f (commit)
via 56af86bdab9c9700a13cc7d622653d34cbaa72f3 (commit)
via 4cbf309be8a302afe3bc041da11c24b593464157 (commit)
via b3bcd825cfb9c19a62a7db4d12717e85aca0b1e8 (commit)
via 3f5a0900a568436b011fc14b628b71bb130ae5f7 (commit)
via 6df7102965c6afdec6f621175f9e91a56ee42a67 (commit)
via 81613a741bcc9cbe909c814fab9ca99c1a1fc2fd (commit)
via cc004ec0ff327ca300cde89ffc252a9b1c588bec (commit)
via c454dfae8988337bd10bfe0551ee62a267049dfe (commit)
via afde75c1fe9ab3fa35acdf1a3b5f80ec389e1190 (commit)
via b4a1bc9ba28398dbd5fdbe4ee4f118a2faf59efa (commit)
via 3ce7b09732207eac03998fa5e267672760e475c9 (commit)
via d9f4f26b0f2c73eddd07b2a4368ae1b238944b80 (commit)
via 59c8ea50e972e7753c96f6bcf46fec48e694daa2 (commit)
via 0f7dd030eb47912112b8774424a62c5561af16a1 (commit)
via fb441884baa9994093ed380aded84e707c3d34b5 (commit)
via 6f5ca0bd47ff6a9b1670f38d6a68a1a7b1a01a5c (commit)
via ee552335b8177318be98e6a4c5d941aa41091a2f (commit)
via edbcbf0ab15f140b96efab5fae808b35e705cf67 (commit)
via c4131b7a0c4a6d666a35847f8cce3d099b7a9949 (commit)
via f3e53fe5cba59946ddcf24be423eece1ab596769 (commit)
via a51d6b87331f0fc991b9926a9101e081668ebbcb (commit)
via 253d1fc351fffc8a0b1d325044854a2defdd7223 (commit)
via d7834356a301b162fb9757427359d0dbac95cecf (commit)
via 39a0a5c65d0802f40ab428474b1e6d981a91fbce (commit)
via 0c9db8bbeb7187218a5b47d82df18e38128d06a3 (commit)
via 9882d600d0bbbc115671b12646e690ccddbf5348 (commit)
via 59b545e90d30444a97c8e925569d240c819d42b4 (commit)
via 7e89c625c5d12b5816c857d0c0910922f8803f82 (commit)
via b9f87e9332895be6915e2f2960a2e921375e8e7f (commit)
via 978ae99ac4aa211ba4ba960f56bb6cdd84b648ae (commit)
via d71b7da05d3e1a82047e35c2720c759bdc0fb44f (commit)
via a577b387b7e5c9c8afd371767fccc85009e84485 (commit)
via 8e82cd7374cda9ef55f88504a94d31b06d7e1bd4 (commit)
via 1351cb42c92cd415003adf6234d96507c8a6d2db (commit)
via 575bd3ec2fe918257cb448eee8ebbff269d85431 (commit)
via 51a7361aef92b8c6caad857ed09f0bea0f210db6 (commit)
via e021b84f7fc20b3e3927093ed87e9c873d33a443 (commit)
via b51f0cfcedc2499aa1c0b85aaebf2fecf244c291 (commit)
via 62432e71ef943744fd4ca9ce216da1b0a7250573 (commit)
via 005c77dfe53b54cef92ce51d91f615eb9c2769c4 (commit)
via ecffd3a7f26c9a1590994bb176494ed4f4ca7a64 (commit)
via ce3bc8504d765ecc9b453398efb18662bd4f277a (commit)
via 94fc6d8d303053c47064c9408947cd49a8e11975 (commit)
via 0edf51aca0ea2d75ed9d96fb612c1005965ec64f (commit)
via ba292defc14029971d5e9043881ddb98c994cfdb (commit)
via c5cf3cc081042fec0e2baea7cdf7f22a8a84664a (commit)
via adcbbb141bdb09a6fd999f3369e15c2881f843ba (commit)
via 80014655d76e758868e8e1ed36472be9a606eb2a (commit)
via b22882ae78f0e5d38d4b6ace0725bf0ae5bc4803 (commit)
via f83a47d3c260982be4918a3d9f5d0b480503b131 (commit)
via 8c84a6865c4b09eccf41c9d2e91a030c941bffab (commit)
via a9ddfa91f81e00400f04548e71ab9519892a6dea (commit)
via 04749187843604f51ddcab4f53811dac9a9ed8a0 (commit)
via 89bd1bf64a6d745f4276fce3ee8fa4e050736ff1 (commit)
via f429202995ebb0dbc86d41c6d707815186832063 (commit)
via f14bc0502c3c4d2ffd609b110771ca1fa752b68e (commit)
via f75d5bd488669426794d086b80568ef0a7a4afe6 (commit)
via c7db1351d3b1c25bfc31ed9e7b6b491e6bcb1555 (commit)
via 67b352b3f7cf736c9aa7c1332aa7814911556ad5 (commit)
via 822a00aee0d7feb845e28dad7dccb552d10d83db (commit)
via c293f639684d2c6625b7395c995aa813eafa5fa4 (commit)
via 00686a614cca93f007335d01c06d78cfd212d973 (commit)
via a53c7d7c450de09ceb04b47cb59450225827bd51 (commit)
via 5b7dee0548f068e626c0bf5d116fc506d2af92a0 (commit)
via 7990857c32cbb49f4bedf805f86c1b718b3a70d0 (commit)
via a03d7d9aae8ac258d266c66c62c63e03ff5d2558 (commit)
via 5d6fde4aa0d2a93945276dd722be48e05da72faf (commit)
via d14895917e4841ee53c46f7ab3f46c3f19489069 (commit)
via eb5023d2a38e0862e2d9a5f1ca4a3788fc131405 (commit)
via 4102716ab6f3cfaa979151029c2859701dfe2ac6 (commit)
via 8975d286a6de827a02b073de32570602cd9cffbc (commit)
via 19c8c07e6e1601180f85f7aad145f00112f3f8a4 (commit)
via 87090907f39983b744749017cdac3fb957d8d0c0 (commit)
via 2808941eebec54dc7c4981f5a2a0e149d452b8ca (commit)
via de06b256b36f6428c5d914266c4e91c25c69ded5 (commit)
via d4867b8dd18ddbee0b30040f569eeac99964343f (commit)
via b5347a6b22c2d82ffa57c8302c81ee0f25b413a1 (commit)
via 848cfc635084c5baccb275ed4995032d3ada2d59 (commit)
via 52357dbe51bd015119a798a4f8e7244a3e1efda4 (commit)
via cce2a00af57ef823abeaeff787eff35f43dfb093 (commit)
via 07cd1647921e0e94432cecb2f7a5413cd8f3884e (commit)
via efea6557fd364ee42c84c08df28efa9797f1c9c8 (commit)
via b2cab7978ff20eff1d4fcb4cf60fc8a4421fc24c (commit)
via 4db53f3593e24b80a33b608432ef463acbec295e (commit)
via 009d45dfbb20a54ea402e7e8f18bc2d253f41ad6 (commit)
via f1d52ff7171da920acc7583fa427a95386312908 (commit)
via f33ffa77fdcc3e40ec42268ea09b67ac65982f1f (commit)
via ac08c1b86b979574678aa110f19fb744719def21 (commit)
via f17363ea38564867df555b6be9138d2eff28daa0 (commit)
via f4620596bd798f3c0e1d4b7738a5c4ca1730cf89 (commit)
via e89a3a1302cd3e95403c5c64edb126153852ff35 (commit)
via 525d9602da83a5d8ddbfc9ebda282209aa743a70 (commit)
via c6dc0f2d6f67d69d32e7f8c3c175d79f4b2ef430 (commit)
via 85b53414c2c8f70e541447ee204e004693289956 (commit)
via 6c3401b4a9fb79bdee7484e1e3c05758d1b0c0ca (commit)
via a5cf5c7b3a6ac9be60a8737f0e36a61897d32acd (commit)
via 734cae300ccd13aacec1f32b283d4d21b5de8fb5 (commit)
via 07708b4325680c4731f0d3dc24bca9da3c962d80 (commit)
via b4007e4b25d21ba3b693674ca19ead7d202b7de0 (commit)
via 600d77cc8af4625a30dceda2033c4aadbbbe71ff (commit)
via 3b1a604abf5709bfda7271fa94213f7d823de69d (commit)
via 0caae46ef006c8322d489d6b140c0aee91928803 (commit)
via 688d0a641d4fa7a018fb4f9e131ed1454c68dd15 (commit)
via c136060da6a43da5db7e45b6a32da83f0f7d0820 (commit)
via 70d50df5bc495661463ff19885b9a4112270bafa (commit)
via d3ef96824420d7f089b28e6521790191e39949bf (commit)
from 71488ea628a1d791eeba41cb2eed3025c6311565 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 5220fc55e92504899d102b5704382382a4e450c1
Merge: d76b95edce86919636ee0e458f0b9def08a9d2ea 71488ea628a1d791eeba41cb2eed3025c6311565
Author: Dima Volodin <dvv at isc.org>
Date: Fri Oct 7 19:24:44 2011 -0400
Merge branch 'trac1140' of ssh://git.bind10.isc.org/var/bind10/git/bind10 into trac1140
Conflicts:
src/lib/dns/tests/rdata_txt_like_unittest.cc
commit d76b95edce86919636ee0e458f0b9def08a9d2ea
Author: Dima Volodin <dvv at isc.org>
Date: Fri Oct 7 12:06:18 2011 -0400
[1140] comparison test added in rdata_txt_like_unittest.cc
commit d4405856fd2e088fbc7cd4caa9b2e9a6c66e8e83
Author: Dima Volodin <dvv at isc.org>
Date: Tue Oct 4 08:43:40 2011 -0400
[1140] style cleanup in rdata_txt_like_unittest.cc
assignment test added in rdata_txt_like_unittest.cc
commit 99fbf7cc5eacc8c0ec65a19a1eb83b4e0a329cd1
Author: Dima Volodin <dvv at isc.org>
Date: Tue Sep 13 08:40:14 2011 -0400
[1140] tabs in rdata_txt_like_unittest.cc expanded
commit ff4a86af49e629a83e40f35c78c7616a208659c4
Author: Dima Volodin <dvv at isc.org>
Date: Mon Sep 12 14:45:02 2011 -0400
[1140] txt&spf unittests redone using typed gtest
commit 47ea557d9d8a9782e4d576c45c545913bbaac4ea
Author: Dima Volodin <dvv at isc.org>
Date: Fri Sep 9 09:48:47 2011 -0400
[1140] SPF test module added
commit 006133b49eb5d44eeacb1d79593b97ae2212bbca
Author: Dima Volodin <dvv at isc.org>
Date: Fri Sep 9 09:42:00 2011 -0400
[1140] tests for SPF and TXT using common source
commit 261656de6d4fbe2f6979162c978023f8859d2289
Author: Dima Volodin <dvv at isc.org>
Date: Tue Aug 23 09:50:14 2011 -0400
[1140] more typos in rdata/generic/detail/txt_like.h
commit 419768f653e098ab566c9f94771552e2bfe3cc99
Author: Dima Volodin <dvv at isc.org>
Date: Tue Aug 23 08:42:57 2011 -0400
[1140] typo on rdata/generic/detail/txt_like.h doxygen
commit affa93f31bbc9719ac4e2ccc0e44d9a09c2b6a3b
Author: Dima Volodin <dvv at isc.org>
Date: Mon Aug 22 08:14:11 2011 -0400
[1140] Documentation added to spf_99.{h,cc} and detail/txt_like.h
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 112 +-
Makefile.am | 4 +
configure.ac | 38 +-
doc/Doxyfile | 6 +-
doc/guide/bind10-guide.html | 595 +++-
doc/guide/bind10-guide.xml | 155 +-
doc/guide/bind10-messages.html | 2028 +++++++++--
doc/guide/bind10-messages.xml | 3790 +++++++++++++++++---
src/bin/auth/Makefile.am | 6 +
src/bin/auth/auth_messages.mes | 3 +
src/bin/auth/auth_srv.cc | 24 +
src/bin/auth/b10-auth.8 | 47 +-
src/bin/auth/b10-auth.xml | 48 +-
src/bin/auth/benchmarks/Makefile.am | 6 +
src/bin/auth/query.cc | 45 +-
src/bin/auth/query.h | 10 +-
src/bin/auth/statistics.cc | 32 +-
src/bin/auth/statistics.h | 20 +
src/bin/auth/tests/Makefile.am | 7 +
src/bin/auth/tests/query_unittest.cc | 106 +-
src/bin/auth/tests/statistics_unittest.cc | 74 +-
src/bin/bind10/Makefile.am | 17 +-
src/bin/bind10/bind10.8 | 16 +-
src/bin/bind10/bind10.xml | 28 +-
src/bin/bind10/bind10_messages.mes | 4 +
src/bin/bind10/bind10_src.py.in | 36 +-
src/bin/bind10/run_bind10.sh.in | 4 +-
src/bin/bind10/tests/Makefile.am | 6 +-
src/bin/bind10/tests/bind10_test.py.in | 30 +-
src/bin/bindctl/Makefile.am | 2 +
src/bin/bindctl/run_bindctl.sh.in | 4 +-
src/bin/bindctl/tests/Makefile.am | 4 +-
src/bin/cfgmgr/b10-cfgmgr.py.in | 2 +-
src/bin/cfgmgr/plugins/tests/Makefile.am | 6 +-
src/bin/cfgmgr/tests/Makefile.am | 10 +-
src/bin/cmdctl/Makefile.am | 15 +-
src/bin/cmdctl/cmdctl.py.in | 2 +-
src/bin/cmdctl/run_b10-cmdctl.sh.in | 10 +-
src/bin/cmdctl/tests/Makefile.am | 4 +-
src/bin/dhcp6/Makefile.am | 9 +-
src/bin/dhcp6/b10-dhcp6.8 | 4 +-
src/bin/dhcp6/dhcp6.h | 73 +-
src/bin/dhcp6/dhcp6_srv.cc | 55 +
src/bin/dhcp6/dhcp6_srv.h | 40 +
src/bin/dhcp6/iface_mgr.cc | 581 +++
src/bin/dhcp6/iface_mgr.h | 103 +
src/bin/dhcp6/interfaces.txt | 10 +
src/bin/dhcp6/main.cc | 36 +-
src/bin/dhcp6/pkt6.cc | 46 +
src/bin/dhcp6/pkt6.h | 62 +
src/bin/dhcp6/tests/Makefile.am | 49 +-
src/bin/dhcp6/tests/dhcp6_srv_unittest.cc | 53 +
src/bin/dhcp6/tests/dhcp6_unittests.cc | 28 +
src/bin/dhcp6/tests/iface_mgr_unittest.cc | 262 ++
src/bin/dhcp6/tests/pkt6_unittest.cc | 44 +
src/bin/host/b10-host.1 | 4 -
src/bin/host/b10-host.xml | 5 -
src/bin/loadzone/Makefile.am | 1 +
src/bin/loadzone/run_loadzone.sh.in | 4 +-
src/bin/loadzone/tests/correct/Makefile.am | 4 +-
src/bin/loadzone/tests/correct/correct_test.sh.in | 2 +-
src/bin/loadzone/tests/error/Makefile.am | 4 +-
src/bin/loadzone/tests/error/error_test.sh.in | 2 +-
src/bin/msgq/tests/Makefile.am | 4 +-
src/bin/resolver/b10-resolver.8 | 30 +-
src/bin/resolver/b10-resolver.xml | 32 +-
src/bin/stats/Makefile.am | 29 +-
src/bin/stats/b10-stats-httpd.8 | 6 +-
src/bin/stats/b10-stats-httpd.xml | 10 +-
src/bin/stats/b10-stats.8 | 103 +-
src/bin/stats/b10-stats.xml | 130 +-
src/bin/stats/stats-httpd-xsl.tpl | 1 +
src/bin/stats/stats-schema.spec | 86 -
src/bin/stats/stats.py.in | 593 ++--
src/bin/stats/stats.spec | 71 +-
src/bin/stats/stats_httpd.py.in | 282 +-
src/bin/stats/stats_messages.mes | 21 +-
src/bin/stats/tests/Makefile.am | 12 +-
src/bin/stats/tests/b10-stats-httpd_test.py | 782 +++--
src/bin/stats/tests/b10-stats_test.py | 1196 +++----
src/bin/stats/tests/fake_select.py | 43 -
src/bin/stats/tests/fake_socket.py | 70 -
src/bin/stats/tests/fake_time.py | 47 -
src/bin/stats/tests/http/Makefile.am | 6 -
src/bin/stats/tests/http/server.py | 96 -
src/bin/stats/tests/isc/Makefile.am | 8 -
src/bin/stats/tests/isc/cc/Makefile.am | 7 -
src/bin/stats/tests/isc/cc/__init__.py | 1 -
src/bin/stats/tests/isc/cc/session.py | 148 -
src/bin/stats/tests/isc/config/Makefile.am | 7 -
src/bin/stats/tests/isc/config/__init__.py | 1 -
src/bin/stats/tests/isc/config/ccsession.py | 249 --
src/bin/stats/tests/isc/log/Makefile.am | 7 -
src/bin/stats/tests/isc/log/__init__.py | 33 -
src/bin/stats/tests/isc/log_messages/Makefile.am | 7 +
src/bin/stats/tests/isc/log_messages/__init__.py | 18 +
.../tests/isc/log_messages/stats_httpd_messages.py | 16 +
.../stats/tests/isc/log_messages/stats_messages.py | 16 +
src/bin/stats/tests/isc/util/Makefile.am | 7 -
src/bin/stats/tests/isc/util/process.py | 21 -
src/bin/stats/tests/test_utils.py | 364 ++
src/bin/stats/tests/testdata/Makefile.am | 1 -
src/bin/stats/tests/testdata/stats_test.spec | 19 -
src/bin/tests/Makefile.am | 7 +-
src/bin/xfrin/Makefile.am | 15 +-
src/bin/xfrin/b10-xfrin.8 | 11 +-
src/bin/xfrin/b10-xfrin.xml | 7 +-
src/bin/xfrin/tests/Makefile.am | 8 +-
src/bin/xfrin/tests/testdata/Makefile.am | 2 +
src/bin/xfrin/tests/testdata/example.com | 17 +
src/bin/xfrin/tests/testdata/example.com.sqlite3 | Bin 0 -> 11264 bytes
src/bin/xfrin/tests/xfrin_test.py | 998 +++++-
src/bin/xfrin/xfrin.py.in | 543 +++-
src/bin/xfrin/xfrin.spec | 5 +
src/bin/xfrin/xfrin_messages.mes | 30 +-
src/bin/xfrout/Makefile.am | 15 +-
src/bin/xfrout/b10-xfrout.xml | 8 +
src/bin/xfrout/tests/Makefile.am | 10 +-
src/bin/xfrout/tests/xfrout_test.py.in | 197 +-
src/bin/xfrout/xfrout.py.in | 184 +-
src/bin/xfrout/xfrout.spec.pre.in | 41 +-
src/bin/xfrout/xfrout_messages.mes | 11 +
src/bin/zonemgr/Makefile.am | 14 +-
src/bin/zonemgr/tests/Makefile.am | 4 +-
src/bin/zonemgr/tests/zonemgr_test.py | 65 +-
src/bin/zonemgr/zonemgr.py.in | 47 +-
src/cppcheck-suppress.lst | 2 +-
src/lib/Makefile.am | 6 +-
src/lib/asiolink/io_address.cc | 5 +
src/lib/asiolink/io_address.h | 13 +
src/lib/asiolink/tests/io_address_unittest.cc | 3 +
src/lib/asiolink/tests/io_endpoint_unittest.cc | 2 +-
src/lib/cache/cache_messages.mes | 4 +-
src/lib/datasrc/Makefile.am | 15 +-
src/lib/datasrc/client.h | 144 +-
src/lib/datasrc/data_source.h | 12 +-
src/lib/datasrc/database.cc | 888 ++++-
src/lib/datasrc/database.h | 600 +++-
src/lib/datasrc/datasrc_messages.mes | 115 +-
src/lib/datasrc/factory.cc | 82 +
src/lib/datasrc/factory.h | 182 +
src/lib/datasrc/iterator.h | 61 +
src/lib/datasrc/memory_datasrc.cc | 287 ++-
src/lib/datasrc/memory_datasrc.h | 54 +-
src/lib/datasrc/sqlite3_accessor.cc | 793 +++--
src/lib/datasrc/sqlite3_accessor.h | 141 +-
src/lib/datasrc/sqlite3_datasrc.cc | 95 +-
src/lib/datasrc/static_datasrc.cc | 1 +
src/lib/datasrc/tests/Makefile.am | 23 +-
src/lib/datasrc/tests/cache_unittest.cc | 6 +-
src/lib/datasrc/tests/client_unittest.cc | 50 +
src/lib/datasrc/tests/database_unittest.cc | 2667 ++++++++++++---
src/lib/datasrc/tests/factory_unittest.cc | 175 +
src/lib/datasrc/tests/memory_datasrc_unittest.cc | 53 +-
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 714 ++++-
src/lib/datasrc/tests/static_unittest.cc | 1 +
src/lib/datasrc/tests/testdata/Makefile.am | 6 +
src/lib/datasrc/tests/testdata/rwtest.sqlite3 | Bin 0 -> 11264 bytes
src/lib/datasrc/zone.h | 368 ++-
src/lib/dns/Makefile.am | 15 +
src/lib/dns/character_string.cc | 140 +
src/lib/dns/character_string.h | 57 +
src/lib/dns/gen-rdatacode.py.in | 17 +-
src/lib/dns/message.cc | 43 +-
src/lib/dns/message.h | 55 +-
src/lib/dns/python/Makefile.am | 53 +-
src/lib/dns/python/edns_python.cc | 262 +-
src/lib/dns/python/edns_python.h | 64 +
src/lib/dns/python/message_python.cc | 541 ++--
src/lib/dns/python/message_python.h | 40 +
src/lib/dns/python/message_python_inc.cc | 41 +
src/lib/dns/python/messagerenderer_python.cc | 94 +-
src/lib/dns/python/messagerenderer_python.h | 37 +-
src/lib/dns/python/name_python.cc | 133 +-
src/lib/dns/python/name_python.h | 45 +-
src/lib/dns/python/opcode_python.cc | 231 +-
src/lib/dns/python/opcode_python.h | 64 +
src/lib/dns/python/pydnspp.cc | 716 ++++-
src/lib/dns/python/pydnspp_common.cc | 36 +
src/lib/dns/python/pydnspp_common.h | 2 -
src/lib/dns/python/pydnspp_towire.h | 4 +-
src/lib/dns/python/question_python.cc | 271 +-
src/lib/dns/python/question_python.h | 66 +
src/lib/dns/python/rcode_python.cc | 109 +-
src/lib/dns/python/rcode_python.h | 49 +-
src/lib/dns/python/rdata_python.cc | 289 +-
src/lib/dns/python/rdata_python.h | 68 +
src/lib/dns/python/rrclass_python.cc | 303 +-
src/lib/dns/python/rrclass_python.h | 68 +
src/lib/dns/python/rrset_python.cc | 494 ++--
src/lib/dns/python/rrset_python.h | 78 +
src/lib/dns/python/rrttl_python.cc | 281 +-
src/lib/dns/python/rrttl_python.h | 67 +
src/lib/dns/python/rrtype_python.cc | 348 +-
src/lib/dns/python/rrtype_python.h | 68 +
src/lib/dns/python/tests/Makefile.am | 2 +-
src/lib/dns/python/tests/message_python_test.py | 52 +-
src/lib/dns/python/tsig_python.cc | 105 +-
src/lib/dns/python/tsig_python.h | 28 +-
src/lib/dns/python/tsig_rdata_python.cc | 62 +-
src/lib/dns/python/tsig_rdata_python.h | 29 +-
src/lib/dns/python/tsigerror_python.cc | 105 +-
src/lib/dns/python/tsigerror_python.h | 10 +-
src/lib/dns/python/tsigkey_python.cc | 133 +-
src/lib/dns/python/tsigkey_python.h | 52 +-
src/lib/dns/python/tsigrecord_python.cc | 82 +-
src/lib/dns/python/tsigrecord_python.h | 28 +-
src/lib/dns/rdata/any_255/tsig_250.cc | 1 +
src/lib/dns/rdata/generic/afsdb_18.cc | 171 +
src/lib/dns/rdata/generic/afsdb_18.h | 74 +
src/lib/dns/rdata/generic/detail/ds_like.h | 225 ++
src/lib/dns/rdata/generic/detail/txt_like.h | 7 +-
src/lib/dns/rdata/generic/dlv_32769.cc | 121 +
src/lib/dns/rdata/generic/dlv_32769.h | 77 +
src/lib/dns/rdata/generic/ds_43.cc | 109 +-
src/lib/dns/rdata/generic/ds_43.h | 33 +-
src/lib/dns/rdata/generic/hinfo_13.cc | 129 +
src/lib/dns/rdata/generic/hinfo_13.h | 77 +
src/lib/dns/rdata/generic/minfo_14.cc | 156 +
src/lib/dns/rdata/generic/minfo_14.h | 82 +
src/lib/dns/rdata/generic/naptr_35.cc | 220 ++
src/lib/dns/rdata/generic/naptr_35.h | 83 +
src/lib/dns/rdata/generic/rp_17.cc | 1 +
src/lib/dns/rdata/generic/rrsig_46.cc | 2 +-
src/lib/dns/rdata/generic/rrsig_46.h | 2 +-
src/lib/dns/rdata/in_1/dhcid_49.cc | 145 +
src/lib/dns/rdata/in_1/dhcid_49.h | 58 +
src/lib/dns/rdata/in_1/srv_33.h | 4 +-
src/lib/dns/rdata/template.cc | 1 +
src/lib/dns/tests/Makefile.am | 7 +-
src/lib/dns/tests/character_string_unittest.cc | 92 +
src/lib/dns/tests/message_unittest.cc | 131 +-
src/lib/dns/tests/rdata_afsdb_unittest.cc | 210 ++
src/lib/dns/tests/rdata_ds_like_unittest.cc | 171 +
src/lib/dns/tests/rdata_ds_unittest.cc | 99 -
src/lib/dns/tests/rdata_hinfo_unittest.cc | 115 +
src/lib/dns/tests/rdata_minfo_unittest.cc | 184 +
src/lib/dns/tests/rdata_naptr_unittest.cc | 178 +
src/lib/dns/tests/rdata_txt_like_unittest.cc | 35 +
src/lib/dns/tests/testdata/Makefile.am | 26 +-
src/lib/dns/tests/testdata/message_fromWire19.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire20.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire21.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire22.spec | 14 +
.../dns/tests/testdata/rdata_afsdb_fromWire1.spec | 3 +
.../dns/tests/testdata/rdata_afsdb_fromWire2.spec | 6 +
.../dns/tests/testdata/rdata_afsdb_fromWire3.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_fromWire4.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_fromWire5.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_toWire1.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_toWire2.spec | 8 +
.../dns/tests/testdata/rdata_minfo_fromWire1.spec | 3 +
.../dns/tests/testdata/rdata_minfo_fromWire2.spec | 7 +
.../dns/tests/testdata/rdata_minfo_fromWire3.spec | 6 +
.../dns/tests/testdata/rdata_minfo_fromWire4.spec | 6 +
.../dns/tests/testdata/rdata_minfo_fromWire5.spec | 5 +
.../dns/tests/testdata/rdata_minfo_fromWire6.spec | 5 +
.../dns/tests/testdata/rdata_minfo_toWire1.spec | 5 +
.../dns/tests/testdata/rdata_minfo_toWire2.spec | 6 +
.../testdata/rdata_minfo_toWireUncompressed1.spec | 7 +
.../testdata/rdata_minfo_toWireUncompressed2.spec | 8 +
src/lib/exceptions/exceptions.h | 12 +
src/lib/python/isc/Makefile.am | 3 +-
src/lib/python/isc/__init__.py | 7 +-
src/lib/python/isc/acl/tests/Makefile.am | 4 +-
src/lib/python/isc/bind10/sockcreator.py | 2 +-
src/lib/python/isc/bind10/tests/Makefile.am | 4 +-
src/lib/python/isc/cc/tests/Makefile.am | 4 +-
src/lib/python/isc/config/Makefile.am | 32 +-
src/lib/python/isc/config/ccsession.py | 2 +-
src/lib/python/isc/config/cfgmgr.py | 2 +-
src/lib/python/isc/config/tests/Makefile.am | 4 +-
src/lib/python/isc/datasrc/Makefile.am | 36 +-
src/lib/python/isc/datasrc/__init__.py | 21 +-
src/lib/python/isc/datasrc/client_inc.cc | 157 +
src/lib/python/isc/datasrc/client_python.cc | 264 ++
src/lib/python/isc/datasrc/client_python.h | 35 +
src/lib/python/isc/datasrc/datasrc.cc | 225 ++
src/lib/python/isc/datasrc/datasrc.h | 50 +
src/lib/python/isc/datasrc/finder_inc.cc | 96 +
src/lib/python/isc/datasrc/finder_python.cc | 248 ++
src/lib/python/isc/datasrc/finder_python.h | 36 +
src/lib/python/isc/datasrc/iterator_inc.cc | 34 +
src/lib/python/isc/datasrc/iterator_python.cc | 202 ++
src/lib/python/isc/datasrc/iterator_python.h | 38 +
src/lib/python/isc/datasrc/sqlite3_ds.py | 84 +-
src/lib/python/isc/datasrc/tests/Makefile.am | 10 +-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 389 ++
.../python/isc/datasrc/tests/sqlite3_ds_test.py | 50 +-
src/lib/python/isc/datasrc/updater_inc.cc | 181 +
src/lib/python/isc/datasrc/updater_python.cc | 318 ++
src/lib/python/isc/datasrc/updater_python.h | 39 +
src/lib/python/isc/dns/Makefile.am | 7 +
src/lib/python/isc/log/log.cc | 2 +-
src/lib/python/isc/log/tests/Makefile.am | 24 +-
src/lib/python/isc/log_messages/Makefile.am | 32 +
src/lib/python/isc/log_messages/README | 68 +
src/lib/python/isc/log_messages/__init__.py | 3 +
src/lib/python/isc/log_messages/bind10_messages.py | 1 +
src/lib/python/isc/log_messages/cfgmgr_messages.py | 1 +
src/lib/python/isc/log_messages/cmdctl_messages.py | 1 +
src/lib/python/isc/log_messages/config_messages.py | 1 +
src/lib/python/isc/log_messages/gen-forwarder.sh | 14 +
.../python/isc/log_messages/libxfrin_messages.py | 1 +
.../python/isc/log_messages/notify_out_messages.py | 1 +
.../isc/log_messages/stats_httpd_messages.py | 1 +
src/lib/python/isc/log_messages/stats_messages.py | 1 +
src/lib/python/isc/log_messages/work/Makefile.am | 12 +
.../python/isc/log_messages/work/__init__.py.in | 3 +
src/lib/python/isc/log_messages/xfrin_messages.py | 1 +
src/lib/python/isc/log_messages/xfrout_messages.py | 1 +
.../python/isc/log_messages/zonemgr_messages.py | 1 +
src/lib/python/isc/net/tests/Makefile.am | 4 +-
src/lib/python/isc/notify/Makefile.am | 17 +-
src/lib/python/isc/notify/notify_out.py | 2 +-
src/lib/python/isc/notify/tests/Makefile.am | 4 +-
src/lib/python/isc/util/tests/Makefile.am | 4 +-
src/lib/python/isc/xfrin/Makefile.am | 23 +
.../http => lib/python/isc/xfrin}/__init__.py | 0
src/lib/python/isc/xfrin/diff.py | 237 ++
src/lib/python/isc/xfrin/libxfrin_messages.mes | 21 +
src/lib/python/isc/xfrin/tests/Makefile.am | 24 +
src/lib/python/isc/xfrin/tests/diff_tests.py | 446 +++
src/lib/util/python/gen_wiredata.py.in | 43 +
src/lib/util/python/pycppwrapper_util.h | 2 +-
src/lib/util/python/wrapper_template.cc | 4 +-
src/lib/util/python/wrapper_template.h | 6 +-
tests/system/bindctl/tests.sh | 16 +-
tests/system/cleanall.sh | 5 +-
329 files changed, 28463 insertions(+), 7512 deletions(-)
create mode 100644 src/bin/dhcp6/dhcp6_srv.cc
create mode 100644 src/bin/dhcp6/dhcp6_srv.h
create mode 100644 src/bin/dhcp6/iface_mgr.cc
create mode 100644 src/bin/dhcp6/iface_mgr.h
create mode 100644 src/bin/dhcp6/interfaces.txt
create mode 100644 src/bin/dhcp6/pkt6.cc
create mode 100644 src/bin/dhcp6/pkt6.h
create mode 100644 src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
create mode 100644 src/bin/dhcp6/tests/dhcp6_unittests.cc
create mode 100644 src/bin/dhcp6/tests/iface_mgr_unittest.cc
create mode 100644 src/bin/dhcp6/tests/pkt6_unittest.cc
mode change 100644 => 100755 src/bin/loadzone/tests/correct/correct_test.sh.in
mode change 100644 => 100755 src/bin/loadzone/tests/error/error_test.sh.in
delete mode 100644 src/bin/stats/stats-schema.spec
mode change 100644 => 100755 src/bin/stats/stats.py.in
mode change 100755 => 100644 src/bin/stats/stats_httpd.py.in
delete mode 100644 src/bin/stats/tests/fake_select.py
delete mode 100644 src/bin/stats/tests/fake_socket.py
delete mode 100644 src/bin/stats/tests/fake_time.py
delete mode 100644 src/bin/stats/tests/http/Makefile.am
delete mode 100644 src/bin/stats/tests/http/server.py
delete mode 100644 src/bin/stats/tests/isc/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/__init__.py
delete mode 100644 src/bin/stats/tests/isc/cc/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/cc/__init__.py
delete mode 100644 src/bin/stats/tests/isc/cc/session.py
delete mode 100644 src/bin/stats/tests/isc/config/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/config/__init__.py
delete mode 100644 src/bin/stats/tests/isc/config/ccsession.py
delete mode 100644 src/bin/stats/tests/isc/log/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/log/__init__.py
create mode 100644 src/bin/stats/tests/isc/log_messages/Makefile.am
create mode 100644 src/bin/stats/tests/isc/log_messages/__init__.py
create mode 100644 src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py
create mode 100644 src/bin/stats/tests/isc/log_messages/stats_messages.py
delete mode 100644 src/bin/stats/tests/isc/util/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/util/__init__.py
delete mode 100644 src/bin/stats/tests/isc/util/process.py
create mode 100644 src/bin/stats/tests/test_utils.py
delete mode 100644 src/bin/stats/tests/testdata/Makefile.am
delete mode 100644 src/bin/stats/tests/testdata/stats_test.spec
create mode 100644 src/bin/xfrin/tests/testdata/Makefile.am
create mode 100644 src/bin/xfrin/tests/testdata/example.com
create mode 100644 src/bin/xfrin/tests/testdata/example.com.sqlite3
create mode 100644 src/lib/datasrc/factory.cc
create mode 100644 src/lib/datasrc/factory.h
create mode 100644 src/lib/datasrc/iterator.h
create mode 100644 src/lib/datasrc/tests/client_unittest.cc
create mode 100644 src/lib/datasrc/tests/factory_unittest.cc
create mode 100644 src/lib/datasrc/tests/testdata/Makefile.am
create mode 100644 src/lib/datasrc/tests/testdata/rwtest.sqlite3
create mode 100644 src/lib/dns/character_string.cc
create mode 100644 src/lib/dns/character_string.h
create mode 100644 src/lib/dns/python/edns_python.h
create mode 100644 src/lib/dns/python/message_python.h
create mode 100644 src/lib/dns/python/message_python_inc.cc
create mode 100644 src/lib/dns/python/opcode_python.h
create mode 100644 src/lib/dns/python/question_python.h
create mode 100644 src/lib/dns/python/rdata_python.h
create mode 100644 src/lib/dns/python/rrclass_python.h
create mode 100644 src/lib/dns/python/rrset_python.h
create mode 100644 src/lib/dns/python/rrttl_python.h
create mode 100644 src/lib/dns/python/rrtype_python.h
create mode 100644 src/lib/dns/rdata/generic/afsdb_18.cc
create mode 100644 src/lib/dns/rdata/generic/afsdb_18.h
create mode 100644 src/lib/dns/rdata/generic/detail/ds_like.h
create mode 100644 src/lib/dns/rdata/generic/dlv_32769.cc
create mode 100644 src/lib/dns/rdata/generic/dlv_32769.h
create mode 100644 src/lib/dns/rdata/generic/hinfo_13.cc
create mode 100644 src/lib/dns/rdata/generic/hinfo_13.h
create mode 100644 src/lib/dns/rdata/generic/minfo_14.cc
create mode 100644 src/lib/dns/rdata/generic/minfo_14.h
create mode 100644 src/lib/dns/rdata/generic/naptr_35.cc
create mode 100644 src/lib/dns/rdata/generic/naptr_35.h
create mode 100644 src/lib/dns/rdata/in_1/dhcid_49.cc
create mode 100644 src/lib/dns/rdata/in_1/dhcid_49.h
create mode 100644 src/lib/dns/tests/character_string_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_afsdb_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_ds_like_unittest.cc
delete mode 100644 src/lib/dns/tests/rdata_ds_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_hinfo_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_minfo_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_naptr_unittest.cc
create mode 100644 src/lib/dns/tests/testdata/message_fromWire19.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire20.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire21.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire22.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
create mode 100644 src/lib/python/isc/datasrc/client_inc.cc
create mode 100644 src/lib/python/isc/datasrc/client_python.cc
create mode 100644 src/lib/python/isc/datasrc/client_python.h
create mode 100644 src/lib/python/isc/datasrc/datasrc.cc
create mode 100644 src/lib/python/isc/datasrc/datasrc.h
create mode 100644 src/lib/python/isc/datasrc/finder_inc.cc
create mode 100644 src/lib/python/isc/datasrc/finder_python.cc
create mode 100644 src/lib/python/isc/datasrc/finder_python.h
create mode 100644 src/lib/python/isc/datasrc/iterator_inc.cc
create mode 100644 src/lib/python/isc/datasrc/iterator_python.cc
create mode 100644 src/lib/python/isc/datasrc/iterator_python.h
create mode 100644 src/lib/python/isc/datasrc/tests/datasrc_test.py
create mode 100644 src/lib/python/isc/datasrc/updater_inc.cc
create mode 100644 src/lib/python/isc/datasrc/updater_python.cc
create mode 100644 src/lib/python/isc/datasrc/updater_python.h
create mode 100644 src/lib/python/isc/dns/Makefile.am
create mode 100644 src/lib/python/isc/log_messages/Makefile.am
create mode 100644 src/lib/python/isc/log_messages/README
create mode 100644 src/lib/python/isc/log_messages/__init__.py
create mode 100644 src/lib/python/isc/log_messages/bind10_messages.py
create mode 100644 src/lib/python/isc/log_messages/cfgmgr_messages.py
create mode 100644 src/lib/python/isc/log_messages/cmdctl_messages.py
create mode 100644 src/lib/python/isc/log_messages/config_messages.py
create mode 100755 src/lib/python/isc/log_messages/gen-forwarder.sh
create mode 100644 src/lib/python/isc/log_messages/libxfrin_messages.py
create mode 100644 src/lib/python/isc/log_messages/notify_out_messages.py
create mode 100644 src/lib/python/isc/log_messages/stats_httpd_messages.py
create mode 100644 src/lib/python/isc/log_messages/stats_messages.py
create mode 100644 src/lib/python/isc/log_messages/work/Makefile.am
create mode 100644 src/lib/python/isc/log_messages/work/__init__.py.in
create mode 100644 src/lib/python/isc/log_messages/xfrin_messages.py
create mode 100644 src/lib/python/isc/log_messages/xfrout_messages.py
create mode 100644 src/lib/python/isc/log_messages/zonemgr_messages.py
create mode 100644 src/lib/python/isc/xfrin/Makefile.am
rename src/{bin/stats/tests/http => lib/python/isc/xfrin}/__init__.py (100%)
create mode 100644 src/lib/python/isc/xfrin/diff.py
create mode 100644 src/lib/python/isc/xfrin/libxfrin_messages.mes
create mode 100644 src/lib/python/isc/xfrin/tests/Makefile.am
create mode 100644 src/lib/python/isc/xfrin/tests/diff_tests.py
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 83cce33..547192e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,9 +1,117 @@
+293. [func]* tomek
+ b10-dhcp6: Implemented DHCPv6 echo server. It joins DHCPv6
+ multicast groups and listens to incoming DHCPv6 client messages.
+ Received messages are then echoed back to clients. This
+ functionality is limited, but it can be used to test out client
+ resiliency to unexpected messages. Note that network interface
+ detection routines are not implemented yet, so interface name
+ and its address must be specified in interfaces.txt.
+ (Trac #878, git 3b1a604abf5709bfda7271fa94213f7d823de69d)
+
+292. [func] dvv
+ Implement the DLV rrtype according to RFC4431.
+ (Trac #1144, git d267c0511a07c41cd92e3b0b9ee9bf693743a7cf)
+
+291. [func] naokikambe
+ Statistics items are specified by each module's spec file.
+ Stats module can read these through the config manager. Stats
+ module and stats httpd report statistics data and statistics
+ schema by each module via both bindctl and HTTP/XML.
+ (Trac #928,#929,#930,#1175, git 054699635affd9c9ecbe7a108d880829f3ba229e)
+
+290. [func] jinmei
+ libdns++/pydnspp: added an option parameter to the "from wire"
+ methods of the Message class. One option is defined,
+ PRESERVE_ORDER, which specifies the parser to handle each RR
+ separately, preserving the order, and constructs RRsets in the
+ message sections so that each RRset contains only one RR.
+ (Trac #1258, git c874cb056e2a5e656165f3c160e1b34ccfe8b302)
+
+289. [func]* jinmei
+ b10-xfrout: ACLs for xfrout can now be configured per zone basis.
+ A per zone ACl is part of a more general zone configuration. A
+ quick example for configuring an ACL for zone "example.com" that
+ rejects any transfer request for that zone is as follows:
+ > config add Xfrout/zone_config
+ > config set Xfrout/zone_config[0]/origin "example.com"
+ > config add Xfrout/zone_config[0]/transfer_acl
+ > config set Xfrout/zone_config[0]/transfer_acl[0] {"action": "REJECT"}
+ The previous global ACL (query_acl) was renamed to transfer_acl,
+ which now works as the default ACL. Note: backward compatibility
+ is not provided, so an existing configuration using query_acl
+ needs to be updated by hand.
+ Note: the per zone configuration framework is a temporary
+ workaround. It will eventually be redesigned as a system wide
+ configuration.
+ (Trac #1165, git 698176eccd5d55759fe9448b2c249717c932ac31)
+
+288. [bug] stephen
+ Fixed problem whereby the order in which component files appeared in
+ rdataclass.cc was system dependent, leading to problems on some
+ systems where data types were used before the header file in which
+ they were declared was included.
+ (Trac #1202, git 4a605525cda67bea8c43ca8b3eae6e6749797450)
+
+287. [bug]* jinmei
+ Python script files for log messages (xxx_messages.py) should have
+ been installed under the "isc" package. This fix itself should
+ be a transparent change without affecting existing configurations
+ or other operational practices, but you may want to clean up the
+ python files from the common directly (such as "site-packages").
+ (Trac #1101, git 0eb576518f81c3758c7dbaa2522bd8302b1836b3)
+
+286. [func] ocean
+ libdns++: Implement the HINFO rrtype support according to RFC1034,
+ and RFC1035.
+ (Trac #1112, git 12d62d54d33fbb1572a1aa3089b0d547d02924aa)
+
+285. [bug] jelte
+ sqlite3 data source: fixed a race condition on initial startup,
+ when the database has not been initialized yet, and multiple
+ processes are trying to do so, resulting in one of them failing.
+ (Trac #326, git 5de6f9658f745e05361242042afd518b444d7466)
+
+284. [bug] jerry
+ b10-zonemgr: zonemgr will not terminate on empty zones, it will
+ log a warning and try to do zone transfer for them.
+ (Trac #1153, git 0a39659638fc68f60b95b102968d7d0ad75443ea)
+
+283. [bug] zhanglikun
+ Make stats and boss processes wait for answer messages from each
+ other in block mode to avoid orphan answer messages, add an internal
+ command "getstats" to boss process for getting statistics data from
+ boss.
+ (Trac #519, git 67d8e93028e014f644868fede3570abb28e5fb43)
+
+282. [func] ocean
+ libdns++: Implement the NAPTR rrtype according to RFC2915,
+ RFC2168 and RFC3403.
+ (Trac #1130, git 01d8d0f13289ecdf9996d6d5d26ac0d43e30549c)
+
+bind10-devel-20110819 released on August 19, 2011
+
+281. [func] jelte
+ Added a new type for configuration data: "named set". This allows for
+ similar configuration as the current "list" type, but with strings
+ instead of indices as identifiers. The intended use is for instance
+ /foo/zones/example.org/bar instead of /foo/zones[2]/bar. Currently
+ this new type is not in use yet.
+ (Trac #926, git 06aeefc4787c82db7f5443651f099c5af47bd4d6)
+
+280. [func] jerry
+ libdns++: Implement the MINFO rrtype according to RFC1035.
+ (Trac #1113, git 7a9a19d6431df02d48a7bc9de44f08d9450d3a37)
+
+279. [func] jerry
+ libdns++: Implement the AFSDB rrtype according to RFC1183.
+ (Trac #1114, git ce052cd92cd128ea3db5a8f154bd151956c2920c)
+
278. [doc] jelte
Add logging configuration documentation to the guide.
(Trac #1011, git 2cc500af0929c1f268aeb6f8480bc428af70f4c4)
277. [func] jerry
- Implement the SRV rrtype according to RFC2782.
+ libdns++: Implement the SRV rrtype according to RFC2782.
(Trac #1128, git 5fd94aa027828c50e63ae1073d9d6708e0a9c223)
276. [func] stephen
@@ -29,7 +137,7 @@
returns is str or byte.
(Trac #1021, git 486bf91e0ecc5fbecfe637e1e75ebe373d42509b)
-273. [func] vorner
+273. [func] vorner
It is possible to specify ACL for the xfrout module. It is in the ACL
configuration key and has the usual ACL syntax. It currently supports
only the source address. Default ACL accepts everything.
diff --git a/Makefile.am b/Makefile.am
index b07ef0f..50aa6b9 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -2,12 +2,16 @@ SUBDIRS = doc src tests
USE_LCOV=@USE_LCOV@
LCOV=@LCOV@
GENHTML=@GENHTML@
+DISTCHECK_GTEST_CONFIGURE_FLAG=@DISTCHECK_GTEST_CONFIGURE_FLAG@
DISTCLEANFILES = config.report
# When running distcheck target, do not install the configurations
DISTCHECK_CONFIGURE_FLAGS = --disable-install-configurations
+# Use same --with-gtest flag if set
+DISTCHECK_CONFIGURE_FLAGS += $(DISTCHECK_GTEST_CONFIGURE_FLAG)
+
clean-cpp-coverage:
@if [ $(USE_LCOV) = yes ] ; then \
$(LCOV) --directory . --zerocounters; \
diff --git a/configure.ac b/configure.ac
index 6e129b6..b0f5f45 100644
--- a/configure.ac
+++ b/configure.ac
@@ -12,6 +12,12 @@ AC_PROG_CXX
# Libtool configuration
#
+
+# libtool cannot handle spaces in paths, so exit early if there is one
+if [ test `echo $PWD | grep -c ' '` != "0" ]; then
+ AC_MSG_ERROR([BIND 10 cannot be built in a directory that contains spaces, because of libtool limitations. Please change the directory name, or use a symbolic link that does not contain spaces.])
+fi
+
# On FreeBSD (and probably some others), clang++ does not meet an autoconf
# assumption in identifying libtool configuration regarding shared library:
# the configure script will execute "$CC -shared $CFLAGS/$CXXFLAGS -v" and
@@ -149,6 +155,16 @@ fi
PYTHON_SITEPKG_DIR=${pyexecdir}
AC_SUBST(PYTHON_SITEPKG_DIR)
+# This will be commonly used in various Makefile.am's that need to generate
+# python log messages.
+PYTHON_LOGMSGPKG_DIR="\$(top_builddir)/src/lib/python/isc/log_messages"
+AC_SUBST(PYTHON_LOGMSGPKG_DIR)
+
+# This is python package paths commonly used in python tests. See
+# README of log_messages for why it's included.
+COMMON_PYTHON_PATH="\$(abs_top_builddir)/src/lib/python/isc/log_messages:\$(abs_top_srcdir)/src/lib/python:\$(abs_top_builddir)/src/lib/python"
+AC_SUBST(COMMON_PYTHON_PATH)
+
# Check for python development environments
if test -x ${PYTHON}-config; then
PYTHON_INCLUDES=`${PYTHON}-config --includes`
@@ -421,7 +437,7 @@ AC_ARG_WITH([botan],
AC_HELP_STRING([--with-botan=PATH],
[specify exact directory of Botan library]),
[botan_path="$withval"])
-if test "${botan_path}" == "no" ; then
+if test "${botan_path}" = "no" ; then
AC_MSG_ERROR([Need botan for libcryptolink])
fi
if test "${botan_path}" != "yes" ; then
@@ -494,7 +510,7 @@ AC_ARG_WITH([log4cplus],
AC_HELP_STRING([--with-log4cplus=PATH],
[specify exact directory of log4cplus library and headers]),
[log4cplus_path="$withval"])
-if test "${log4cplus_path}" == "no" ; then
+if test "${log4cplus_path}" = "no" ; then
AC_MSG_ERROR([Need log4cplus])
elif test "${log4cplus_path}" != "yes" ; then
LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
@@ -634,6 +650,7 @@ fi
#
if test "$gtest_path" != "no"
then
+ DISTCHECK_GTEST_CONFIGURE_FLAG="--with-gtest=\"$gtest_path\""
if test "$gtest_path" != "yes"; then
GTEST_PATHS=$gtest_path
if test -x "${gtest_path}/bin/gtest-config" ; then
@@ -674,8 +691,10 @@ else
GTEST_INCLUDES=
GTEST_LDFLAGS=
GTEST_LDADD=
+ DISTCHECK_GTEST_CONFIGURE_FLAG=
fi
AM_CONDITIONAL(HAVE_GTEST, test $gtest_path != "no")
+AC_SUBST(DISTCHECK_GTEST_CONFIGURE_FLAG)
AC_SUBST(GTEST_INCLUDES)
AC_SUBST(GTEST_LDFLAGS)
AC_SUBST(GTEST_LDADD)
@@ -795,19 +814,13 @@ AC_CONFIG_FILES([Makefile
src/bin/sockcreator/tests/Makefile
src/bin/xfrin/Makefile
src/bin/xfrin/tests/Makefile
+ src/bin/xfrin/tests/testdata/Makefile
src/bin/xfrout/Makefile
src/bin/xfrout/tests/Makefile
src/bin/zonemgr/Makefile
src/bin/zonemgr/tests/Makefile
src/bin/stats/Makefile
src/bin/stats/tests/Makefile
- src/bin/stats/tests/isc/Makefile
- src/bin/stats/tests/isc/cc/Makefile
- src/bin/stats/tests/isc/config/Makefile
- src/bin/stats/tests/isc/util/Makefile
- src/bin/stats/tests/isc/log/Makefile
- src/bin/stats/tests/testdata/Makefile
- src/bin/stats/tests/http/Makefile
src/bin/usermgr/Makefile
src/bin/tests/Makefile
src/lib/Makefile
@@ -828,12 +841,15 @@ AC_CONFIG_FILES([Makefile
src/lib/python/isc/util/tests/Makefile
src/lib/python/isc/datasrc/Makefile
src/lib/python/isc/datasrc/tests/Makefile
+ src/lib/python/isc/dns/Makefile
src/lib/python/isc/cc/Makefile
src/lib/python/isc/cc/tests/Makefile
src/lib/python/isc/config/Makefile
src/lib/python/isc/config/tests/Makefile
src/lib/python/isc/log/Makefile
src/lib/python/isc/log/tests/Makefile
+ src/lib/python/isc/log_messages/Makefile
+ src/lib/python/isc/log_messages/work/Makefile
src/lib/python/isc/net/Makefile
src/lib/python/isc/net/tests/Makefile
src/lib/python/isc/notify/Makefile
@@ -841,6 +857,8 @@ AC_CONFIG_FILES([Makefile
src/lib/python/isc/testutils/Makefile
src/lib/python/isc/bind10/Makefile
src/lib/python/isc/bind10/tests/Makefile
+ src/lib/python/isc/xfrin/Makefile
+ src/lib/python/isc/xfrin/tests/Makefile
src/lib/config/Makefile
src/lib/config/tests/Makefile
src/lib/config/tests/testdata/Makefile
@@ -856,6 +874,7 @@ AC_CONFIG_FILES([Makefile
src/lib/exceptions/tests/Makefile
src/lib/datasrc/Makefile
src/lib/datasrc/tests/Makefile
+ src/lib/datasrc/tests/testdata/Makefile
src/lib/xfr/Makefile
src/lib/log/Makefile
src/lib/log/compiler/Makefile
@@ -931,6 +950,7 @@ AC_OUTPUT([doc/version.ent
src/lib/python/isc/cc/tests/cc_test
src/lib/python/isc/notify/tests/notify_out_test
src/lib/python/isc/log/tests/log_console.py
+ src/lib/python/isc/log_messages/work/__init__.py
src/lib/dns/gen-rdatacode.py
src/lib/python/bind10_config.py
src/lib/cc/session_config.h.pre
diff --git a/doc/Doxyfile b/doc/Doxyfile
index ceb806f..8be9098 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -568,13 +568,13 @@ WARN_LOGFILE =
# directories like "/usr/src/myproject". Separate the files or directories
# with spaces.
-INPUT = ../src/lib/cc ../src/lib/config \
- ../src/lib/cryptolink ../src/lib/dns ../src/lib/datasrc \
+INPUT = ../src/lib/exceptions ../src/lib/cc \
+ ../src/lib/config ../src/lib/cryptolink ../src/lib/dns ../src/lib/datasrc \
../src/bin/auth ../src/bin/resolver ../src/lib/bench ../src/lib/log \
../src/lib/log/compiler ../src/lib/asiolink/ ../src/lib/nsas \
../src/lib/testutils ../src/lib/cache ../src/lib/server_common/ \
../src/bin/sockcreator/ ../src/lib/util/ \
- ../src/lib/resolve ../src/lib/acl
+ ../src/lib/resolve ../src/lib/acl ../src/bin/dhcp6
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 5754cf0..1070a2e 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,24 +1,24 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
- 20110519.</p></div><div><p class="copyright">Copyright © 2010 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
+ 20110809.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the reference guide for BIND 10 version 20110519.
+ This is the reference guide for BIND 10 version 20110809.
The most up-to-date version of this document, along with
- other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+ other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229460181">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229460208">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229445988">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446178">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229446197">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229446258">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229446356">Build</a></span></dt><dt><span class="section"><a href="#id1168229446371">Install</a></span></dt><dt><span class="section"><a href="#id1168229446394">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446979">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229447044">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229447074">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447556">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229447671">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447788">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447799">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229448040">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229448215">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229448428">Logging Message Format</a></span></dt></dl></dd></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><di
v class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229460181">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229460208">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
BIND is the popular implementation of a DNS server, developer
interfaces, and DNS tools.
BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++ and Python
and provides a modular environment for serving and maintaining DNS.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This guide covers the experimental prototype of
- BIND 10 version 20110519.
+ BIND 10 version 20110809.
</p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
BIND 10 provides a EDNS0- and DNSSEC-capable
authoritative DNS server and a caching recursive name server
which also provides forwarding.
- </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299038"></a>Supported Platforms</h2></div></div></div><p>
+ </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229460181"></a>Supported Platforms</h2></div></div></div><p>
BIND 10 builds have been tested on Debian GNU/Linux 5,
Ubuntu 9.10, NetBSD 5, Solaris 10, FreeBSD 7 and 8, and CentOS
Linux 5.3.
@@ -28,13 +28,15 @@
It is planned for BIND 10 to build, install and run on
Windows and standard Unix-type platforms.
- </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299065"></a>Required Software</h2></div></div></div><p>
+ </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229460208"></a>Required Software</h2></div></div></div><p>
BIND 10 requires Python 3.1. Later versions may work, but Python
3.1 is the minimum version which will work.
</p><p>
BIND 10 uses the Botan crypto library for C++. It requires
- at least Botan version 1.8. To build BIND 10, install the
- Botan libraries and development include headers.
+ at least Botan version 1.8.
+ </p><p>
+ BIND 10 uses the log4cplus C++ logging library. It requires
+ at least log4cplus version 1.0.3.
</p><p>
The authoritative server requires SQLite 3.3.9 or newer.
The <span class="command"><strong>b10-xfrin</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>,
@@ -136,7 +138,10 @@
and, of course, DNS. These include detailed developer
documentation and code examples.
- </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284846"></a>Building Requirements</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229445988">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446178">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229446197">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229446258">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229446356">Build</a></span></dt><dt><span class="section"><a href="#id1168229446371">Install</a></span></dt><dt><span class="section"><a href="#id1168229446394">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229445988"></a>Building Requirements</h2></div></div></div><p>
+ In addition to the run-time requirements, building BIND 10
+ from source code requires various development include headers.
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Some operating systems have split their distribution packages into
a run-time and a development package. You will need to install
the development package versions, which include header files and
@@ -147,6 +152,11 @@
</p><p>
+ To build BIND 10, also install the Botan (at least version
+ 1.8) and the log4cplus (at least version 1.0.3)
+ development include headers.
+ </p><p>
+
The Python Library and Python _sqlite3 module are required to
enable the Xfrout and Xfrin support.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
@@ -156,7 +166,7 @@
Building BIND 10 also requires a C++ compiler and
standard development headers, make, and pkg-config.
BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
- 4.1.3, 4.2.1, 4.3.2, and 4.4.1.
+ 4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
</p></div><div class="section" title="Quick start"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="quickstart"></a>Quick start</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This quickly covers the standard steps for installing
and deploying BIND 10 as an authoritative name server using
@@ -192,14 +202,14 @@
the Git code revision control system or as a downloadable
tar file. It may also be available in pre-compiled ready-to-use
packages from operating system vendors.
- </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285026"></a>Download Tar File</h3></div></div></div><p>
+ </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446178"></a>Download Tar File</h3></div></div></div><p>
Downloading a release tar file is the recommended method to
obtain the source code.
</p><p>
The BIND 10 releases are available as tar file downloads from
<a class="ulink" href="ftp://ftp.isc.org/isc/bind10/" target="_top">ftp://ftp.isc.org/isc/bind10/</a>.
Periodic development snapshots may also be available.
- </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285045"></a>Retrieve from Git</h3></div></div></div><p>
+ </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446197"></a>Retrieve from Git</h3></div></div></div><p>
Downloading this "bleeding edge" code is recommended only for
developers or advanced users. Using development code in a production
environment is not recommended.
@@ -233,7 +243,7 @@
<span class="command"><strong>autoheader</strong></span>,
<span class="command"><strong>automake</strong></span>,
and related commands.
- </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285106"></a>Configure before the build</h3></div></div></div><p>
+ </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446258"></a>Configure before the build</h3></div></div></div><p>
BIND 10 uses the GNU Build System to discover build environment
details.
To generate the makefiles using the defaults, simply run:
@@ -242,7 +252,7 @@
Run <span class="command"><strong>./configure</strong></span> with the <code class="option">--help</code>
switch to view the different options. The commonly-used options are:
- </p><div class="variablelist"><dl><dt><span class="term">--prefix</span></dt><dd>Define the the installation location (the
+ </p><div class="variablelist"><dl><dt><span class="term">--prefix</span></dt><dd>Define the installation location (the
default is <code class="filename">/usr/local/</code>).
</dd><dt><span class="term">--with-boost-include</span></dt><dd>Define the path to find the Boost headers.
</dd><dt><span class="term">--with-pythonpath</span></dt><dd>Define the path to Python 3.1 if it is not in the
@@ -264,16 +274,16 @@
</p><p>
If the configure fails, it may be due to missing or old
dependencies.
- </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285203"></a>Build</h3></div></div></div><p>
+ </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446356"></a>Build</h3></div></div></div><p>
After the configure step is complete, to build the executables
from the C++ code and prepare the Python scripts, run:
</p><pre class="screen">$ <strong class="userinput"><code>make</code></strong></pre><p>
- </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285219"></a>Install</h3></div></div></div><p>
+ </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446371"></a>Install</h3></div></div></div><p>
To install the BIND 10 executables, support files,
and documentation, run:
</p><pre class="screen">$ <strong class="userinput"><code>make install</code></strong></pre><p>
- </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285242"></a>Install Hierarchy</h3></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446394"></a>Install Hierarchy</h3></div></div></div><p>
The following is the layout of the complete BIND 10 installation:
</p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem">
<code class="filename">bin/</code> —
@@ -304,14 +314,14 @@
data source and configuration databases.
</li></ul></div><p>
</p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></div><p>
- BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
+ BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
starts up the required processes.
<span class="command"><strong>bind10</strong></span>
will also restart processes that exit unexpectedly.
This is the only command needed to start the BIND 10 system.
</p><p>
After starting the <span class="command"><strong>b10-msgq</strong></span> communications channel,
- <span class="command"><strong>bind10</strong></span> connects to it,
+ <span class="command"><strong>bind10</strong></span> connects to it,
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</p><p>
@@ -334,7 +344,12 @@
To start the BIND 10 service, simply run <span class="command"><strong>bind10</strong></span>.
Run it with the <code class="option">--verbose</code> switch to
get additional debugging or diagnostic output.
- </p></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ If the setproctitle Python module is detected at start up,
+ the process names for the Python-based daemons will be renamed
+ to better identify them instead of just <span class="quote">“<span class="quote">python</span>”</span>.
+ This is not needed on some operating systems.
+ </p></div></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
The BIND 10 components use the <span class="command"><strong>b10-msgq</strong></span>
message routing daemon to communicate with other BIND 10 components.
The <span class="command"><strong>b10-msgq</strong></span> implements what is called the
@@ -490,12 +505,12 @@ shutdown
the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
channel) the configuration on to the specified module.
</p><p>
- </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></div><p>
+ </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229446979">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229447044">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229447074">Loading Master Zones Files</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
It supports EDNS0 and DNSSEC. It supports IPv6.
Normally it is started by the <span class="command"><strong>bind10</strong></span> master
process.
- </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285816"></a>Server Configurations</h2></div></div></div><p>
+ </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229446979"></a>Server Configurations</h2></div></div></div><p>
<span class="command"><strong>b10-auth</strong></span> is configured via the
<span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -515,7 +530,7 @@ This may be a temporary setting until then.
</p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
</dd></dl></div><p>
- </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285881"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447044"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
supports a SQLite3 data source backend and in-memory data source
backend.
@@ -529,7 +544,7 @@ This may be a temporary setting until then.
The default is <code class="filename">/usr/local/var/</code>.)
This data file location may be changed by defining the
<span class="quote">“<span class="quote">database_file</span>”</span> configuration.
- </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285912"></a>Loading Master Zones Files</h2></div></div></div><p>
+ </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447074"></a>Loading Master Zones Files</h2></div></div></div><p>
RFC 1035 style DNS master zone files may imported
into a BIND 10 data source by using the
<span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -569,7 +584,7 @@ This may be a temporary setting until then.
provide <span class="quote">“<span class="quote">secondary</span>”</span> service.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
@@ -591,7 +606,7 @@ This may be a temporary setting until then.
NOTIFY messages to slaves.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
Access control is not yet provided.
</p></div></div><div class="chapter" title="Chapter 11. Secondary Manager"><div class="titlepage"><div><div><h2 class="title"><a name="zonemgr"></a>Chapter 11. Secondary Manager</h2></div></div></div><p>
The <span class="command"><strong>b10-zonemgr</strong></span> process is started by
@@ -607,13 +622,13 @@ This may be a temporary setting until then.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Access control (such as allowing notifies) is not yet provided.
The primary/secondary service is not yet complete.
- </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229447556">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229447671">Forwarding</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-resolver</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
</p><p>
The main <span class="command"><strong>bind10</strong></span> process can be configured
- to select to run either the authoritative or resolver.
+ to select to run either the authoritative or resolver or both.
By default, it starts the authoritative service.
@@ -629,14 +644,52 @@ This may be a temporary setting until then.
The master <span class="command"><strong>bind10</strong></span> will stop and start
the desired services.
</p><p>
- The resolver also needs to be configured to listen on an address
- and port:
+ By default, the resolver listens on port 53 for 127.0.0.1 and ::1.
+ The following example shows how it can be configured to
+ listen on an additional address (and port):
</p><pre class="screen">
-> <strong class="userinput"><code>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</code></strong>
+> <strong class="userinput"><code>config add Resolver/listen_on</code></strong>
+> <strong class="userinput"><code>config set Resolver/listen_on[<em class="replaceable"><code>2</code></em>]/address "192.168.1.1"</code></strong>
+> <strong class="userinput"><code>config set Resolver/listen_on[<em class="replaceable"><code>2</code></em>]/port 53</code></strong>
> <strong class="userinput"><code>config commit</code></strong>
</pre><p>
- </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286300"></a>Forwarding</h2></div></div></div><p>
+ </p><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
+ as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
+ Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447556"></a>Access Control</h2></div></div></div><p>
+ By default, the <span class="command"><strong>b10-resolver</strong></span> daemon only accepts
+ DNS queries from the localhost (127.0.0.1 and ::1).
+ The <code class="option">Resolver/query_acl</code> configuration may
+ be used to reject, drop, or allow specific IPs or networks.
+ This configuration list is first match.
+ </p><p>
+ The configuration's <code class="option">action</code> item may be
+ set to <span class="quote">“<span class="quote">ACCEPT</span>”</span> to allow the incoming query,
+ <span class="quote">“<span class="quote">REJECT</span>”</span> to respond with a DNS REFUSED return
+ code, or <span class="quote">“<span class="quote">DROP</span>”</span> to ignore the query without
+ any response (such as a blackhole). For more information,
+ see the respective debugging messages: <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_ACCEPTED" target="_top">RESOLVER_QUERY_ACCEPTED</a>,
+ <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_REJECTED" target="_top">RESOLVER_QUERY_REJECTED</a>,
+ and <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_DROPPED" target="_top">RESOLVER_QUERY_DROPPED</a>.
+ </p><p>
+ The required configuration's <code class="option">from</code> item is set
+ to an IPv4 or IPv6 address, addresses with an network mask, or to
+ the special lowercase keywords <span class="quote">“<span class="quote">any6</span>”</span> (for
+ any IPv6 address) or <span class="quote">“<span class="quote">any4</span>”</span> (for any IPv4
+ address).
+ </p><p>
+ For example to allow the <em class="replaceable"><code>192.168.1.0/24</code></em>
+ network to use your recursive name server, at the
+ <span class="command"><strong>bindctl</strong></span> prompt run:
+ </p><pre class="screen">
+> <strong class="userinput"><code>config add Resolver/query_acl</code></strong>
+> <strong class="userinput"><code>config set Resolver/query_acl[<em class="replaceable"><code>2</code></em>]/action "ACCEPT"</code></strong>
+> <strong class="userinput"><code>config set Resolver/query_acl[<em class="replaceable"><code>2</code></em>]/from "<em class="replaceable"><code>192.168.1.0/24</code></em>"</code></strong>
+> <strong class="userinput"><code>config commit</code></strong>
+</pre><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
+ as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
+ Resolver/query_acl</code></strong></span>”</span> if needed.)</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>This prototype access control configuration
+ syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447671"></a>Forwarding</h2></div></div></div><p>
To enable forwarding, the upstream address and port must be
configured to forward queries to, such as:
@@ -664,68 +717,440 @@ This may be a temporary setting until then.
</p><p>
- This stats daemon provides commands to identify if it is running,
- show specified or all statistics data, set values, remove data,
- and reset data.
+ This stats daemon provides commands to identify if it is
+ running, show specified or all statistics data, show specified
+ or all statistics data schema, and set specified statistics
+ data.
For example, using <span class="command"><strong>bindctl</strong></span>:
</p><pre class="screen">
> <strong class="userinput"><code>Stats show</code></strong>
{
- "auth.queries.tcp": 1749,
- "auth.queries.udp": 867868,
- "bind10.boot_time": "2011-01-20T16:59:03Z",
- "report_time": "2011-01-20T17:04:06Z",
- "stats.boot_time": "2011-01-20T16:59:05Z",
- "stats.last_update_time": "2011-01-20T17:04:05Z",
- "stats.lname": "4d3869d9_a at jreed.example.net",
- "stats.start_time": "2011-01-20T16:59:05Z",
- "stats.timestamp": 1295543046.823504
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
}
</pre><p>
- </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><p>
- Each message written by BIND 10 to the configured logging destinations
- comprises a number of components that identify the origin of the
- message and, if the message indicates a problem, information about the
- problem that may be useful in fixing it.
- </p><p>
- Consider the message below logged to a file:
- </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229447788">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447799">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229448040">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229448215">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229448428">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447788"></a>Logging configuration</h2></div></div></div><p>
+
+ The logging system in BIND 10 is configured through the
+ Logging module. All BIND 10 modules will look at the
+ configuration in Logging to see what should be logged and
+ to where.
+
+
+
+ </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229447799"></a>Loggers</h3></div></div></div><p>
+
+ Within BIND 10, a message is logged through a component
+ called a "logger". Different parts of BIND 10 log messages
+ through different loggers, and each logger can be configured
+ independently of one another.
+
+ </p><p>
+
+ In the Logging module, you can specify the configuration
+ for zero or more loggers; any that are not specified will
+ take appropriate default values..
+
+ </p><p>
+
+ The three most important elements of a logger configuration
+ are the <code class="option">name</code> (the component that is
+ generating the messages), the <code class="option">severity</code>
+ (what to log), and the <code class="option">output_options</code>
+ (where to log).
+
+ </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447824"></a>name (string)</h4></div></div></div><p>
+ Each logger in the system has a name, the name being that
+ of the component using it to log messages. For instance,
+ if you want to configure logging for the resolver module,
+ you add an entry for a logger named <span class="quote">“<span class="quote">Resolver</span>”</span>. This
+ configuration will then be used by the loggers in the
+ Resolver module, and all the libraries used by it.
+ </p><p>
+
+ If you want to specify logging for one specific library
+ within the module, you set the name to
+ <em class="replaceable"><code>module.library</code></em>. For example, the
+ logger used by the nameserver address store component
+ has the full name of <span class="quote">“<span class="quote">Resolver.nsas</span>”</span>. If
+ there is no entry in Logging for a particular library,
+ it will use the configuration given for the module.
+
+
+
+ </p><p>
+
+
+
+ To illustrate this, suppose you want the cache library
+ to log messages of severity DEBUG, and the rest of the
+ resolver code to log messages of severity INFO. To achieve
+ this you specify two loggers, one with the name
+ <span class="quote">“<span class="quote">Resolver</span>”</span> and severity INFO, and one with
+ the name <span class="quote">“<span class="quote">Resolver.cache</span>”</span> with severity
+ DEBUG. As there are no entries for other libraries (e.g.
+ the nsas), they will use the configuration for the module
+ (<span class="quote">“<span class="quote">Resolver</span>”</span>), so giving the desired behavior.
+
+ </p><p>
+
+ One special case is that of a module name of <span class="quote">“<span class="quote">*</span>”</span>
+ (asterisks), which is interpreted as <span class="emphasis"><em>any</em></span>
+ module. You can set global logging options by using this,
+ including setting the logging configuration for a library
+ that is used by multiple modules (e.g. <span class="quote">“<span class="quote">*.config</span>”</span>
+ specifies the configuration library code in whatever
+ module is using it).
+
+ </p><p>
+
+ If there are multiple logger specifications in the
+ configuration that might match a particular logger, the
+ specification with the more specific logger name takes
+ precedence. For example, if there are entries for for
+ both <span class="quote">“<span class="quote">*</span>”</span> and <span class="quote">“<span class="quote">Resolver</span>”</span>, the
+ resolver module — and all libraries it uses —
+ will log messages according to the configuration in the
+ second entry (<span class="quote">“<span class="quote">Resolver</span>”</span>). All other modules
+ will use the configuration of the first entry
+ (<span class="quote">“<span class="quote">*</span>”</span>). If there was also a configuration
+ entry for <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, the cache library
+ within the resolver would use that in preference to the
+ entry for <span class="quote">“<span class="quote">Resolver</span>”</span>.
+
+ </p><p>
+
+ One final note about the naming. When specifying the
+ module name within a logger, use the name of the module
+ as specified in <span class="command"><strong>bindctl</strong></span>, e.g.
+ <span class="quote">“<span class="quote">Resolver</span>”</span> for the resolver module,
+ <span class="quote">“<span class="quote">Xfrout</span>”</span> for the xfrout module, etc. When
+ the message is logged, the message will include the name
+ of the logger generating the message, but with the module
+ name replaced by the name of the process implementing
+ the module (so for example, a message generated by the
+ <span class="quote">“<span class="quote">Auth.cache</span>”</span> logger will appear in the output
+ with a logger name of <span class="quote">“<span class="quote">b10-auth.cache</span>”</span>).
+
+ </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447923"></a>severity (string)</h4></div></div></div><p>
+
+ This specifies the category of messages logged.
+ Each message is logged with an associated severity which
+ may be one of the following (in descending order of
+ severity):
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> FATAL </li><li class="listitem"> ERROR </li><li class="listitem"> WARN </li><li class="listitem"> INFO </li><li class="listitem"> DEBUG </li></ul></div><p>
+
+ When the severity of a logger is set to one of these
+ values, it will only log messages of that severity, and
+ the severities above it. The severity may also be set to
+ NONE, in which case all messages from that logger are
+ inhibited.
+
+
+
+ </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447973"></a>output_options (list)</h4></div></div></div><p>
+
+ Each logger can have zero or more
+ <code class="option">output_options</code>. These specify where log
+ messages are sent to. These are explained in detail below.
+
+ </p><p>
+
+ The other options for a logger are:
+
+ </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447990"></a>debuglevel (integer)</h4></div></div></div><p>
+
+ When a logger's severity is set to DEBUG, this value
+ specifies what debug messages should be printed. It ranges
+ from 0 (least verbose) to 99 (most verbose).
+ </p><p>
+
+ If severity for the logger is not DEBUG, this value is ignored.
+
+ </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448005"></a>additive (true or false)</h4></div></div></div><p>
+
+ If this is true, the <code class="option">output_options</code> from
+ the parent will be used. For example, if there are two
+ loggers configured; <span class="quote">“<span class="quote">Resolver</span>”</span> and
+ <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, and <code class="option">additive</code>
+ is true in the second, it will write the log messages
+ not only to the destinations specified for
+ <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, but also to the destinations
+ as specified in the <code class="option">output_options</code> in
+ the logger named <span class="quote">“<span class="quote">Resolver</span>”</span>.
+
+
+
+ </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229448040"></a>Output Options</h3></div></div></div><p>
+
+ The main settings for an output option are the
+ <code class="option">destination</code> and a value called
+ <code class="option">output</code>, the meaning of which depends on
+ the destination that is set.
+
+ </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448056"></a>destination (string)</h4></div></div></div><p>
+
+ The destination is the type of output. It can be one of:
+
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448088"></a>output (string)</h4></div></div></div><p>
+
+ Depending on what is set as the output destination, this
+ value is interpreted as follows:
+
+ </p><div class="variablelist"><dl><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">console</span>”</span></span></dt><dd>
+ The value of output must be one of <span class="quote">“<span class="quote">stdout</span>”</span>
+ (messages printed to standard output) or
+ <span class="quote">“<span class="quote">stderr</span>”</span> (messages printed to standard
+ error).
+ </dd><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span></span></dt><dd>
+ The value of output is interpreted as a file name;
+ log messages will be appended to this file.
+ </dd><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">syslog</span>”</span></span></dt><dd>
+ The value of output is interpreted as the
+ <span class="command"><strong>syslog</strong></span> facility (e.g.
+ <span class="emphasis"><em>local0</em></span>) that should be used
+ for log messages.
+ </dd></dl></div><p>
+
+ The other options for <code class="option">output_options</code> are:
+
+ </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448172"></a>flush (true of false)</h5></div></div></div><p>
+ Flush buffers after each log message. Doing this will
+ reduce performance but will ensure that if the program
+ terminates abnormally, all messages up to the point of
+ termination are output.
+ </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448182"></a>maxsize (integer)</h5></div></div></div><p>
+ Only relevant when destination is file, this is maximum
+ file size of output files in bytes. When the maximum
+ size is reached, the file is renamed and a new file opened.
+ (For example, a ".1" is appended to the name —
+ if a ".1" file exists, it is renamed ".2",
+ etc.)
+ </p><p>
+ If this is 0, no maximum file size is used.
+ </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448196"></a>maxver (integer)</h5></div></div></div><p>
+ Maximum number of old log files to keep around when
+ rolling the output file. Only relevant when
+ <code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span>.
+ </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229448215"></a>Example session</h3></div></div></div><p>
+
+ In this example we want to set the global logging to
+ write to the file <code class="filename">/var/log/my_bind10.log</code>,
+ at severity WARN. We want the authoritative server to
+ log at DEBUG with debuglevel 40, to a different file
+ (<code class="filename">/tmp/debug_messages</code>).
+
+ </p><p>
+
+ Start <span class="command"><strong>bindctl</strong></span>.
+
+ </p><p>
+
+ </p><pre class="screen">["login success "]
+> <strong class="userinput"><code>config show Logging</code></strong>
+Logging/loggers [] list
+</pre><p>
+
+ </p><p>
+
+ By default, no specific loggers are configured, in which
+ case the severity defaults to INFO and the output is
+ written to stderr.
+
+ </p><p>
+
+ Let's first add a default logger:
+
+ </p><p>
+
+ </p><pre class="screen"><strong class="userinput"><code>> config add Logging/loggers</code></strong>
+> <strong class="userinput"><code>config show Logging</code></strong>
+Logging/loggers/ list (modified)
+</pre><p>
+
+ </p><p>
+
+ The loggers value line changed to indicate that it is no
+ longer an empty list:
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code>config show Logging/loggers</code></strong>
+Logging/loggers[0]/name "" string (default)
+Logging/loggers[0]/severity "INFO" string (default)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</pre><p>
+
+ </p><p>
+
+ The name is mandatory, so we must set it. We will also
+ change the severity as well. Let's start with the global
+ logger.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code>config set Logging/loggers[0]/name *</code></strong>
+> <strong class="userinput"><code>config set Logging/loggers[0]/severity WARN</code></strong>
+> <strong class="userinput"><code>config show Logging/loggers</code></strong>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</pre><p>
+
+ </p><p>
+
+ Of course, we need to specify where we want the log
+ messages to go, so we add an entry for an output option.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config add Logging/loggers[0]/output_options</code></strong>
+> <strong class="userinput"><code> config show Logging/loggers[0]/output_options</code></strong>
+Logging/loggers[0]/output_options[0]/destination "console" string (default)
+Logging/loggers[0]/output_options[0]/output "stdout" string (default)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 0 integer (default)
+Logging/loggers[0]/output_options[0]/maxver 0 integer (default)
+</pre><p>
+
+
+ </p><p>
+
+ These aren't the values we are looking for.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/destination file</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/maxsize 30000</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/maxver 8</code></strong>
+</pre><p>
+
+ </p><p>
+
+ Which would make the entire configuration for this logger
+ look like:
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config show all Logging/loggers</code></strong>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options[0]/destination "file" string (modified)
+Logging/loggers[0]/output_options[0]/output "/var/log/bind10.log" string (modified)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 30000 integer (modified)
+Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
+</pre><p>
+
+ </p><p>
+
+ That looks OK, so let's commit it before we add the
+ configuration for the authoritative server's logger.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config commit</code></strong></pre><p>
+
+ </p><p>
+
+ Now that we have set it, and checked each value along
+ the way, adding a second entry is quite similar.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config add Logging/loggers</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/name Auth</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/severity DEBUG</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/debuglevel 40</code></strong>
+> <strong class="userinput"><code> config add Logging/loggers[1]/output_options</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/output_options[0]/destination file</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log</code></strong>
+> <strong class="userinput"><code> config commit</code></strong>
+</pre><p>
+
+ </p><p>
+
+ And that's it. Once we have found whatever it was we
+ needed the debug messages for, we can simply remove the
+ second logger to let the authoritative server use the
+ same settings as the rest.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config remove Logging/loggers[1]</code></strong>
+> <strong class="userinput"><code> config commit</code></strong>
+</pre><p>
+
+ </p><p>
+
+ And every module will now be using the values from the
+ logger named <span class="quote">“<span class="quote">*</span>”</span>.
+
+ </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229448428"></a>Logging Message Format</h2></div></div></div><p>
+ Each message written by BIND 10 to the configured logging
+ destinations comprises a number of components that identify
+ the origin of the message and, if the message indicates
+ a problem, information about the problem that may be
+ useful in fixing it.
+ </p><p>
+ Consider the message below logged to a file:
+ </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</pre><p>
- </p><p>
- Note: the layout of messages written to the system logging
- file (syslog) may be slightly different. This message has
- been split across two lines here for display reasons; in the
- logging file, it will appear on one line.)
- </p><p>
- The log message comprises a number of components:
-
- </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
- The date and time at which the message was generated.
- </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
- The severity of the message.
- </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
- The source of the message. This comprises two components:
- the BIND 10 process generating the message (in this
- case, <span class="command"><strong>b10-resolver</strong></span>) and the module
- within the program from which the message originated
- (which in the example is the asynchronous I/O link
- module, asiolink).
- </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
+ </p><p>
+ Note: the layout of messages written to the system logging
+ file (syslog) may be slightly different. This message has
+ been split across two lines here for display reasons; in the
+ logging file, it will appear on one line.)
+ </p><p>
+ The log message comprises a number of components:
+
+ </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
+ The date and time at which the message was generated.
+ </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
+ The severity of the message.
+ </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
+ The source of the message. This comprises two components:
+ the BIND 10 process generating the message (in this
+ case, <span class="command"><strong>b10-resolver</strong></span>) and the module
+ within the program from which the message originated
+ (which in the example is the asynchronous I/O link
+ module, asiolink).
+ </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
The message identification. Every message in BIND 10
has a unique identification, which can be used as an
index into the <a class="ulink" href="bind10-messages.html" target="_top"><em class="citetitle">BIND 10 Messages
Manual</em></a> (<a class="ulink" href="http://bind10.isc.org/docs/bind10-messages.html" target="_top">http://bind10.isc.org/docs/bind10-messages.html</a>) from which more information can be obtained.
- </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
- A brief description of the cause of the problem. Within this text,
- information relating to the condition that caused the message to
- be logged will be included. In this example, error number 111
- (an operating system-specific error number) was encountered when
- trying to open a TCP connection to port 53 on the local system
- (address 127.0.0.1). The next step would be to find out the reason
- for the failure by consulting your system's documentation to
- identify what error number 111 means.
- </p></dd></dl></div><p>
-
- </p></div></div></body></html>
+ </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
+ A brief description of the cause of the problem.
+ Within this text, information relating to the condition
+ that caused the message to be logged will be included.
+ In this example, error number 111 (an operating
+ system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the
+ local system (address 127.0.0.1). The next step
+ would be to find out the reason for the failure by
+ consulting your system's documentation to identify
+ what error number 111 means.
+ </p></dd></dl></div><p>
+ </p></div></div></div></body></html>
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index ef66f3d..00ffee6 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -241,7 +241,7 @@
<section id="managing_once_running">
<title>Managing BIND 10</title>
-
+
<para>
Once BIND 10 is running, a few commands are used to interact
directly with the system:
@@ -280,7 +280,7 @@
<!-- TODO point to these -->
In addition, manual pages are also provided in the default installation.
</para>
-
+
<!--
bin/
bindctl*
@@ -387,7 +387,7 @@ Debian and Ubuntu:
</para>
<orderedlist>
-
+
<listitem>
<simpara>
Install required build dependencies.
@@ -471,7 +471,7 @@ Debian and Ubuntu:
Downloading a release tar file is the recommended method to
obtain the source code.
</para>
-
+
<para>
The BIND 10 releases are available as tar file downloads from
<ulink url="ftp://ftp.isc.org/isc/bind10/"/>.
@@ -550,34 +550,34 @@ Debian and Ubuntu:
<simpara>Define the installation location (the
default is <filename>/usr/local/</filename>).
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-boost-include</term>
- <listitem>
+ <listitem>
<simpara>Define the path to find the Boost headers.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-pythonpath</term>
- <listitem>
+ <listitem>
<simpara>Define the path to Python 3.1 if it is not in the
standard execution path.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-gtest</term>
- <listitem>
+ <listitem>
<simpara>Enable building the C++ Unit Tests using the
Google Tests framework. Optionally this can define the
path to the gtest header files and library.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
</variablelist>
@@ -696,13 +696,13 @@ Debian and Ubuntu:
</para>
</section>
-->
-
+
</chapter>
<chapter id="bind10">
<title>Starting BIND10 with <command>bind10</command></title>
<para>
- BIND 10 provides the <command>bind10</command> command which
+ BIND 10 provides the <command>bind10</command> command which
starts up the required processes.
<command>bind10</command>
will also restart processes that exit unexpectedly.
@@ -711,7 +711,7 @@ Debian and Ubuntu:
<para>
After starting the <command>b10-msgq</command> communications channel,
- <command>bind10</command> connects to it,
+ <command>bind10</command> connects to it,
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</para>
@@ -779,7 +779,7 @@ Debian and Ubuntu:
<command>b10-msgq</command> service.
It listens on 127.0.0.1.
</para>
-
+
<!-- TODO: this is broken, see Trac #111
<para>
To select an alternate port for the <command>b10-msgq</command> to
@@ -1105,10 +1105,10 @@ since we used bind10 -->
The configuration data item is:
<variablelist>
-
+
<varlistentry>
<term>database_file</term>
- <listitem>
+ <listitem>
<simpara>This is an optional string to define the path to find
the SQLite3 database file.
<!-- TODO: -->
@@ -1130,7 +1130,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>shutdown</term>
- <listitem>
+ <listitem>
<simpara>Stop the authoritative DNS server.
</simpara>
<!-- TODO: what happens when this is sent, will bind10 restart? -->
@@ -1186,7 +1186,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$INCLUDE</term>
- <listitem>
+ <listitem>
<simpara>Loads an additional zone file. This may be recursive.
</simpara>
</listitem>
@@ -1194,7 +1194,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$ORIGIN</term>
- <listitem>
+ <listitem>
<simpara>Defines the relative domain name.
</simpara>
</listitem>
@@ -1202,7 +1202,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$TTL</term>
- <listitem>
+ <listitem>
<simpara>Defines the time-to-live value used for following
records that don't include a TTL.
</simpara>
@@ -1267,7 +1267,7 @@ TODO
<note><simpara>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
<!-- TODO: sqlite3 data source only? -->
@@ -1314,7 +1314,7 @@ what if a NOTIFY is sent?
<note><simpara>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
Access control is not yet provided.
</simpara></note>
@@ -1370,7 +1370,7 @@ what is XfroutClient xfr_client??
<para>
The main <command>bind10</command> process can be configured
- to select to run either the authoritative or resolver.
+ to select to run either the authoritative or resolver or both.
By default, it starts the authoritative service.
<!-- TODO: later both -->
@@ -1390,16 +1390,85 @@ what is XfroutClient xfr_client??
</para>
<para>
- The resolver also needs to be configured to listen on an address
- and port:
+ By default, the resolver listens on port 53 for 127.0.0.1 and ::1.
+ The following example shows how it can be configured to
+ listen on an additional address (and port):
<screen>
-> <userinput>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</userinput>
+> <userinput>config add Resolver/listen_on</userinput>
+> <userinput>config set Resolver/listen_on[<replaceable>2</replaceable>]/address "192.168.1.1"</userinput>
+> <userinput>config set Resolver/listen_on[<replaceable>2</replaceable>]/port 53</userinput>
> <userinput>config commit</userinput>
</screen>
</para>
-<!-- TODO: later the above will have some defaults -->
+ <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+ as needed; run <quote><userinput>config show
+ Resolver/listen_on</userinput></quote> if needed.)</simpara>
+<!-- TODO: this example should not include the port, ticket #1185 -->
+
+ <section>
+ <title>Access Control</title>
+
+ <para>
+ By default, the <command>b10-resolver</command> daemon only accepts
+ DNS queries from the localhost (127.0.0.1 and ::1).
+ The <option>Resolver/query_acl</option> configuration may
+ be used to reject, drop, or allow specific IPs or networks.
+ This configuration list is first match.
+ </para>
+
+ <para>
+ The configuration's <option>action</option> item may be
+ set to <quote>ACCEPT</quote> to allow the incoming query,
+ <quote>REJECT</quote> to respond with a DNS REFUSED return
+ code, or <quote>DROP</quote> to ignore the query without
+ any response (such as a blackhole). For more information,
+ see the respective debugging messages: <ulink
+ url="bind10-messages.html#RESOLVER_QUERY_ACCEPTED">RESOLVER_QUERY_ACCEPTED</ulink>,
+ <ulink
+ url="bind10-messages.html#RESOLVER_QUERY_REJECTED">RESOLVER_QUERY_REJECTED</ulink>,
+ and <ulink
+url="bind10-messages.html#RESOLVER_QUERY_DROPPED">RESOLVER_QUERY_DROPPED</ulink>.
+ </para>
+
+ <para>
+ The required configuration's <option>from</option> item is set
+ to an IPv4 or IPv6 address, addresses with an network mask, or to
+ the special lowercase keywords <quote>any6</quote> (for
+ any IPv6 address) or <quote>any4</quote> (for any IPv4
+ address).
+ </para>
+
+<!-- TODO:
+/0 is for any address in that address family
+does that need any address too?
+
+TODO: tsig
+-->
+
+ <para>
+ For example to allow the <replaceable>192.168.1.0/24</replaceable>
+ network to use your recursive name server, at the
+ <command>bindctl</command> prompt run:
+ </para>
+
+ <screen>
+> <userinput>config add Resolver/query_acl</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/action "ACCEPT"</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/from "<replaceable>192.168.1.0/24</replaceable>"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+
+ <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+ as needed; run <quote><userinput>config show
+ Resolver/query_acl</userinput></quote> if needed.)</simpara>
+
+<!-- TODO: check this -->
+ <note><simpara>This prototype access control configuration
+ syntax may be changed.</simpara></note>
+
+ </section>
<section>
<title>Forwarding</title>
@@ -1453,24 +1522,30 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
<para>
- This stats daemon provides commands to identify if it is running,
- show specified or all statistics data, set values, remove data,
- and reset data.
+ This stats daemon provides commands to identify if it is
+ running, show specified or all statistics data, show specified
+ or all statistics data schema, and set specified statistics
+ data.
For example, using <command>bindctl</command>:
<screen>
> <userinput>Stats show</userinput>
{
- "auth.queries.tcp": 1749,
- "auth.queries.udp": 867868,
- "bind10.boot_time": "2011-01-20T16:59:03Z",
- "report_time": "2011-01-20T17:04:06Z",
- "stats.boot_time": "2011-01-20T16:59:05Z",
- "stats.last_update_time": "2011-01-20T17:04:05Z",
- "stats.lname": "4d3869d9_a at jreed.example.net",
- "stats.start_time": "2011-01-20T16:59:05Z",
- "stats.timestamp": 1295543046.823504
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
}
</screen>
</para>
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
index b075e96..237b7ad 100644
--- a/doc/guide/bind10-messages.html
+++ b/doc/guide/bind10-messages.html
@@ -1,10 +1,10 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
- 20110519.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+ 20110809.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the messages manual for BIND 10 version 20110519.
+ This is the messages manual for BIND 10 version 20110809.
The most up-to-date version of this document, along with
other documents for BIND 10, can be found at
<a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
@@ -26,38 +26,635 @@
For information on configuring and using BIND 10 logging,
refer to the <a class="ulink" href="bind10-guide.html" target="_top">BIND 10 Guide</a>.
</p></div><div class="chapter" title="Chapter 2. BIND 10 Messages"><div class="titlepage"><div><div><h2 class="title"><a name="messages"></a>Chapter 2. BIND 10 Messages</h2></div></div></div><p>
- </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCHCOMP"></a><span class="term">ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</span></dt><dd><p>
-A debug message, this records the the upstream fetch (a query made by the
+ </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCH_COMPLETED"></a><span class="term">ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</span></dt><dd><p>
+A debug message, this records that the upstream fetch (a query made by the
resolver on behalf of its client) to the specified address has completed.
-</p></dd><dt><a name="ASIODNS_FETCHSTOP"></a><span class="term">ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_FETCH_STOPPED"></a><span class="term">ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
An external component has requested the halting of an upstream fetch. This
is an allowed operation, and the message should only appear if debug is
enabled.
-</p></dd><dt><a name="ASIODNS_OPENSOCK"></a><span class="term">ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_OPEN_SOCKET"></a><span class="term">ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
The asynchronous I/O code encountered an error when trying to open a socket
of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
message.
-</p></dd><dt><a name="ASIODNS_RECVSOCK"></a><span class="term">ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</span></dt><dd><p>
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
-</p></dd><dt><a name="ASIODNS_RECVTMO"></a><span class="term">ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_READ_DATA"></a><span class="term">ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_READ_TIMEOUT"></a><span class="term">ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
An upstream fetch from the specified address timed out. This may happen for
any number of reasons and is most probably a problem at the remote server
or a problem on the network. The message will only appear if debug is
enabled.
-</p></dd><dt><a name="ASIODNS_SENDSOCK"></a><span class="term">ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
-</p></dd><dt><a name="ASIODNS_UNKORIGIN"></a><span class="term">ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
-</p></dd><dt><a name="ASIODNS_UNKRESULT"></a><span class="term">ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message). This message should
-not appear and may indicate an internal error. Please enter a bug report.
+</p></dd><dt><a name="ASIODNS_SEND_DATA"></a><span class="term">ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_ORIGIN"></a><span class="term">ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_RESULT"></a><span class="term">ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
+</p></dd><dt><a name="AUTH_AXFR_ERROR"></a><span class="term">AUTH_AXFR_ERROR error handling AXFR request: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</p></dd><dt><a name="AUTH_AXFR_UDP"></a><span class="term">AUTH_AXFR_UDP AXFR query received over UDP</span></dt><dd><p>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_COMMAND_FAILED"></a><span class="term">AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</span></dt><dd><p>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_CREATED"></a><span class="term">AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</span></dt><dd><p>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</span></dt><dd><p>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_STARTED"></a><span class="term">AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</span></dt><dd><p>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_LOAD_FAIL"></a><span class="term">AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</span></dt><dd><p>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</p></dd><dt><a name="AUTH_CONFIG_UPDATE_FAIL"></a><span class="term">AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</span></dt><dd><p>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</p></dd><dt><a name="AUTH_DATA_SOURCE"></a><span class="term">AUTH_DATA_SOURCE data source database file: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</p></dd><dt><a name="AUTH_DNS_SERVICES_CREATED"></a><span class="term">AUTH_DNS_SERVICES_CREATED DNS services created</span></dt><dd><p>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_HEADER_PARSE_FAIL"></a><span class="term">AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</p></dd><dt><a name="AUTH_LOAD_TSIG"></a><span class="term">AUTH_LOAD_TSIG loading TSIG keys</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_LOAD_ZONE"></a><span class="term">AUTH_LOAD_ZONE loaded zone %1/%2</span></dt><dd><p>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_DISABLED"></a><span class="term">AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_ENABLED"></a><span class="term">AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</p></dd><dt><a name="AUTH_NOTIFY_QUESTIONS"></a><span class="term">AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</p></dd><dt><a name="AUTH_NOTIFY_RRTYPE"></a><span class="term">AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_NO_STATS_SESSION"></a><span class="term">AUTH_NO_STATS_SESSION session interface for statistics is not available</span></dt><dd><p>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</p></dd><dt><a name="AUTH_NO_XFRIN"></a><span class="term">AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PARSE_ERROR"></a><span class="term">AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PROTOCOL_ERROR"></a><span class="term">AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_RECEIVED"></a><span class="term">AUTH_PACKET_RECEIVED message received:\n%1</span></dt><dd><p>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</p><p>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_PROCESS_FAIL"></a><span class="term">AUTH_PROCESS_FAIL message processing failure: %1</span></dt><dd><p>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</p><p>
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
+</p></dd><dt><a name="AUTH_RECEIVED_COMMAND"></a><span class="term">AUTH_RECEIVED_COMMAND command '%1' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</p></dd><dt><a name="AUTH_RECEIVED_SENDSTATS"></a><span class="term">AUTH_RECEIVED_SENDSTATS command 'sendstats' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</p></dd><dt><a name="AUTH_RESPONSE_RECEIVED"></a><span class="term">AUTH_RESPONSE_RECEIVED received response message, ignoring</span></dt><dd><p>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</p></dd><dt><a name="AUTH_SEND_ERROR_RESPONSE"></a><span class="term">AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SEND_NORMAL_RESPONSE"></a><span class="term">AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SERVER_CREATED"></a><span class="term">AUTH_SERVER_CREATED server created</span></dt><dd><p>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</p></dd><dt><a name="AUTH_SERVER_FAILED"></a><span class="term">AUTH_SERVER_FAILED server failed: %1</span></dt><dd><p>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</p></dd><dt><a name="AUTH_SERVER_STARTED"></a><span class="term">AUTH_SERVER_STARTED server started</span></dt><dd><p>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</p></dd><dt><a name="AUTH_SQLITE3"></a><span class="term">AUTH_SQLITE3 nothing to do for loading sqlite3</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_CREATED"></a><span class="term">AUTH_STATS_CHANNEL_CREATED STATS session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_COMMS"></a><span class="term">AUTH_STATS_COMMS communication error in sending statistics data: %1</span></dt><dd><p>
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMEOUT"></a><span class="term">AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</span></dt><dd><p>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMER_DISABLED"></a><span class="term">AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</p></dd><dt><a name="AUTH_STATS_TIMER_SET"></a><span class="term">AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</p></dd><dt><a name="AUTH_UNSUPPORTED_OPCODE"></a><span class="term">AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</span></dt><dd><p>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_CREATED"></a><span class="term">AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_ZONEMGR_COMMS"></a><span class="term">AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</p></dd><dt><a name="AUTH_ZONEMGR_ERROR"></a><span class="term">AUTH_ZONEMGR_ERROR received error response from zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</p></dd><dt><a name="BIND10_CHECK_MSGQ_ALREADY_RUNNING"></a><span class="term">BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running</span></dt><dd><p>
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+</p></dd><dt><a name="BIND10_CONFIGURATION_START_AUTH"></a><span class="term">BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</span></dt><dd><p>
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+</p></dd><dt><a name="BIND10_CONFIGURATION_START_RESOLVER"></a><span class="term">BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</span></dt><dd><p>
+This message shows whether or not the resolver should be
+started according to the configuration.
+</p></dd><dt><a name="BIND10_INVALID_USER"></a><span class="term">BIND10_INVALID_USER invalid user: %1</span></dt><dd><p>
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+</p></dd><dt><a name="BIND10_KILLING_ALL_PROCESSES"></a><span class="term">BIND10_KILLING_ALL_PROCESSES killing all started processes</span></dt><dd><p>
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+</p></dd><dt><a name="BIND10_KILL_PROCESS"></a><span class="term">BIND10_KILL_PROCESS killing process %1</span></dt><dd><p>
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+</p></dd><dt><a name="BIND10_MSGQ_ALREADY_RUNNING"></a><span class="term">BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start</span></dt><dd><p>
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+</p></dd><dt><a name="BIND10_MSGQ_DAEMON_ENDED"></a><span class="term">BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</span></dt><dd><p>
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+</p></dd><dt><a name="BIND10_MSGQ_DISAPPEARED"></a><span class="term">BIND10_MSGQ_DISAPPEARED msgq channel disappeared</span></dt><dd><p>
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED_NO_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</span></dt><dd><p>
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</span></dt><dd><p>
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+</p></dd><dt><a name="BIND10_READING_BOSS_CONFIGURATION"></a><span class="term">BIND10_READING_BOSS_CONFIGURATION reading boss configuration</span></dt><dd><p>
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+</p></dd><dt><a name="BIND10_RECEIVED_COMMAND"></a><span class="term">BIND10_RECEIVED_COMMAND received command: %1</span></dt><dd><p>
+The boss module received a command and shall now process it. The command
+is printed.
+</p></dd><dt><a name="BIND10_RECEIVED_NEW_CONFIGURATION"></a><span class="term">BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1</span></dt><dd><p>
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+</p></dd><dt><a name="BIND10_RECEIVED_SIGNAL"></a><span class="term">BIND10_RECEIVED_SIGNAL received signal %1</span></dt><dd><p>
+The boss module received the given signal.
+</p></dd><dt><a name="BIND10_RESURRECTED_PROCESS"></a><span class="term">BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)</span></dt><dd><p>
+The given process has been restarted successfully, and is now running
+with the given process id.
+</p></dd><dt><a name="BIND10_RESURRECTING_PROCESS"></a><span class="term">BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...</span></dt><dd><p>
+The given process has ended unexpectedly, and is now restarted.
+</p></dd><dt><a name="BIND10_SELECT_ERROR"></a><span class="term">BIND10_SELECT_ERROR error in select() call: %1</span></dt><dd><p>
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+</p></dd><dt><a name="BIND10_SEND_SIGKILL"></a><span class="term">BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)</span></dt><dd><p>
+The boss module is sending a SIGKILL signal to the given process.
+</p></dd><dt><a name="BIND10_SEND_SIGTERM"></a><span class="term">BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</span></dt><dd><p>
+The boss module is sending a SIGTERM signal to the given process.
+</p></dd><dt><a name="BIND10_SHUTDOWN"></a><span class="term">BIND10_SHUTDOWN stopping the server</span></dt><dd><p>
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+</p></dd><dt><a name="BIND10_SHUTDOWN_COMPLETE"></a><span class="term">BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete</span></dt><dd><p>
+All child processes have been stopped, and the boss process will now
+stop itself.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_CAUSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1</span></dt><dd><p>
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_RESPONSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</span></dt><dd><p>
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_CRASHED"></a><span class="term">BIND10_SOCKCREATOR_CRASHED the socket creator crashed</span></dt><dd><p>
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_EOF"></a><span class="term">BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</span></dt><dd><p>
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_INIT"></a><span class="term">BIND10_SOCKCREATOR_INIT initializing socket creator parser</span></dt><dd><p>
+The boss module initializes routines for parsing the socket creator
+protocol.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_KILL"></a><span class="term">BIND10_SOCKCREATOR_KILL killing the socket creator</span></dt><dd><p>
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_TERMINATE"></a><span class="term">BIND10_SOCKCREATOR_TERMINATE terminating socket creator</span></dt><dd><p>
+The boss module sends a request to terminate to the socket creator.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_TRANSPORT_ERROR"></a><span class="term">BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1</span></dt><dd><p>
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+</p></dd><dt><a name="BIND10_SOCKET_CREATED"></a><span class="term">BIND10_SOCKET_CREATED successfully created socket %1</span></dt><dd><p>
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+</p></dd><dt><a name="BIND10_SOCKET_ERROR"></a><span class="term">BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3</span></dt><dd><p>
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+</p></dd><dt><a name="BIND10_SOCKET_GET"></a><span class="term">BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</span></dt><dd><p>
+The boss forwards a request for a socket to the socket creator.
+</p></dd><dt><a name="BIND10_STARTED_PROCESS"></a><span class="term">BIND10_STARTED_PROCESS started %1</span></dt><dd><p>
+The given process has successfully been started.
+</p></dd><dt><a name="BIND10_STARTED_PROCESS_PID"></a><span class="term">BIND10_STARTED_PROCESS_PID started %1 (PID %2)</span></dt><dd><p>
+The given process has successfully been started, and has the given PID.
+</p></dd><dt><a name="BIND10_STARTING"></a><span class="term">BIND10_STARTING starting BIND10: %1</span></dt><dd><p>
+Informational message on startup that shows the full version.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS"></a><span class="term">BIND10_STARTING_PROCESS starting process %1</span></dt><dd><p>
+The boss module is starting the given process.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT"></a><span class="term">BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</span></dt><dd><p>
+The boss module is starting the given process, which will listen on the
+given port number.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT_ADDRESS"></a><span class="term">BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)</span></dt><dd><p>
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+</p></dd><dt><a name="BIND10_STARTUP_COMPLETE"></a><span class="term">BIND10_STARTUP_COMPLETE BIND 10 started</span></dt><dd><p>
+All modules have been successfully started, and BIND 10 is now running.
+</p></dd><dt><a name="BIND10_STARTUP_ERROR"></a><span class="term">BIND10_STARTUP_ERROR error during startup: %1</span></dt><dd><p>
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT"></a><span class="term">BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</span></dt><dd><p>
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</p></dd><dt><a name="BIND10_STOP_PROCESS"></a><span class="term">BIND10_STOP_PROCESS asking %1 to shut down</span></dt><dd><p>
+The boss module is sending a shutdown command to the given module over
+the message channel.
+</p></dd><dt><a name="BIND10_UNKNOWN_CHILD_PROCESS_ENDED"></a><span class="term">BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</span></dt><dd><p>
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+</p></dd><dt><a name="CACHE_ENTRY_MISSING_RRSET"></a><span class="term">CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</span></dt><dd><p>
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+</p></dd><dt><a name="CACHE_LOCALZONE_FOUND"></a><span class="term">CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data</span></dt><dd><p>
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+</p></dd><dt><a name="CACHE_LOCALZONE_UNKNOWN"></a><span class="term">CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data</span></dt><dd><p>
+Debug message. The requested data was not found in the local zone data.
+</p></dd><dt><a name="CACHE_LOCALZONE_UPDATE"></a><span class="term">CACHE_LOCALZONE_UPDATE updating local zone element at key %1</span></dt><dd><p>
+Debug message issued when there's update to the local zone section of cache.
+</p></dd><dt><a name="CACHE_MESSAGES_DEINIT"></a><span class="term">CACHE_MESSAGES_DEINIT deinitialized message cache</span></dt><dd><p>
+Debug message. It is issued when the server deinitializes the message cache.
+</p></dd><dt><a name="CACHE_MESSAGES_EXPIRED"></a><span class="term">CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache</span></dt><dd><p>
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+</p></dd><dt><a name="CACHE_MESSAGES_FOUND"></a><span class="term">CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache</span></dt><dd><p>
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+</p></dd><dt><a name="CACHE_MESSAGES_INIT"></a><span class="term">CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2</span></dt><dd><p>
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+</p></dd><dt><a name="CACHE_MESSAGES_REMOVE"></a><span class="term">CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first</span></dt><dd><p>
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+</p></dd><dt><a name="CACHE_MESSAGES_UNCACHEABLE"></a><span class="term">CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3</span></dt><dd><p>
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+</p></dd><dt><a name="CACHE_MESSAGES_UNKNOWN"></a><span class="term">CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache</span></dt><dd><p>
+Debug message. The message cache didn't find any entry for the given key.
+</p></dd><dt><a name="CACHE_MESSAGES_UPDATE"></a><span class="term">CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3</span></dt><dd><p>
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
+</p></dd><dt><a name="CACHE_RESOLVER_DEEPEST"></a><span class="term">CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+</p></dd><dt><a name="CACHE_RESOLVER_INIT"></a><span class="term">CACHE_RESOLVER_INIT initializing resolver cache for class %1</span></dt><dd><p>
+Debug message. The resolver cache is being created for this given class.
+</p></dd><dt><a name="CACHE_RESOLVER_INIT_INFO"></a><span class="term">CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1</span></dt><dd><p>
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+</p></dd><dt><a name="CACHE_RESOLVER_LOCAL_MSG"></a><span class="term">CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data</span></dt><dd><p>
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+</p></dd><dt><a name="CACHE_RESOLVER_LOCAL_RRSET"></a><span class="term">CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data</span></dt><dd><p>
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+</p></dd><dt><a name="CACHE_RESOLVER_LOOKUP_MSG"></a><span class="term">CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+</p></dd><dt><a name="CACHE_RESOLVER_LOOKUP_RRSET"></a><span class="term">CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+</p></dd><dt><a name="CACHE_RESOLVER_NO_QUESTION"></a><span class="term">CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section</span></dt><dd><p>
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+</p></dd><dt><a name="CACHE_RESOLVER_UNKNOWN_CLASS_MSG"></a><span class="term">CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1</span></dt><dd><p>
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+</p></dd><dt><a name="CACHE_RESOLVER_UNKNOWN_CLASS_RRSET"></a><span class="term">CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1</span></dt><dd><p>
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_MSG"></a><span class="term">CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3</span></dt><dd><p>
+Debug message. The resolver is updating a message in the cache.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_RRSET"></a><span class="term">CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3</span></dt><dd><p>
+Debug message. The resolver is updating an RRset in the cache.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG"></a><span class="term">CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1</span></dt><dd><p>
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET"></a><span class="term">CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1</span></dt><dd><p>
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+</p></dd><dt><a name="CACHE_RRSET_EXPIRED"></a><span class="term">CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3</span></dt><dd><p>
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+</p></dd><dt><a name="CACHE_RRSET_INIT"></a><span class="term">CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2</span></dt><dd><p>
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+</p></dd><dt><a name="CACHE_RRSET_LOOKUP"></a><span class="term">CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</span></dt><dd><p>
+Debug message. The resolver is trying to look up data in the RRset cache.
+</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+</p></dd><dt><a name="CACHE_RRSET_REMOVE_OLD"></a><span class="term">CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+</p></dd><dt><a name="CACHE_RRSET_UNTRUSTED"></a><span class="term">CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+</p></dd><dt><a name="CACHE_RRSET_UPDATE"></a><span class="term">CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache</span></dt><dd><p>
+Debug message. The RRset is updating its data with this given RRset.
+</p></dd><dt><a name="CC_ASYNC_READ_FAILED"></a><span class="term">CC_ASYNC_READ_FAILED asynchronous read failed</span></dt><dd><p>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</p></dd><dt><a name="CC_CONN_ERROR"></a><span class="term">CC_CONN_ERROR error connecting to message queue (%1)</span></dt><dd><p>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</p></dd><dt><a name="CC_DISCONNECT"></a><span class="term">CC_DISCONNECT disconnecting from message queue daemon</span></dt><dd><p>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</p></dd><dt><a name="CC_ESTABLISH"></a><span class="term">CC_ESTABLISH trying to establish connection with message queue daemon at %1</span></dt><dd><p>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</p></dd><dt><a name="CC_ESTABLISHED"></a><span class="term">CC_ESTABLISHED successfully connected to message queue daemon</span></dt><dd><p>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</p></dd><dt><a name="CC_GROUP_RECEIVE"></a><span class="term">CC_GROUP_RECEIVE trying to receive a message</span></dt><dd><p>
+Debug message, noting that a message is expected to come over the command
+channel.
+</p></dd><dt><a name="CC_GROUP_RECEIVED"></a><span class="term">CC_GROUP_RECEIVED message arrived ('%1', '%2')</span></dt><dd><p>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</p></dd><dt><a name="CC_GROUP_SEND"></a><span class="term">CC_GROUP_SEND sending message '%1' to group '%2'</span></dt><dd><p>
+Debug message, we're about to send a message over the command channel.
+</p></dd><dt><a name="CC_INVALID_LENGTHS"></a><span class="term">CC_INVALID_LENGTHS invalid length parameters (%1, %2)</span></dt><dd><p>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+</p></dd><dt><a name="CC_LENGTH_NOT_READY"></a><span class="term">CC_LENGTH_NOT_READY length not ready</span></dt><dd><p>
+There should be data representing the length of message on the socket, but it
+is not there.
+</p></dd><dt><a name="CC_NO_MESSAGE"></a><span class="term">CC_NO_MESSAGE no message ready to be received yet</span></dt><dd><p>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</p></dd><dt><a name="CC_NO_MSGQ"></a><span class="term">CC_NO_MSGQ unable to connect to message queue (%1)</span></dt><dd><p>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</p></dd><dt><a name="CC_READ_ERROR"></a><span class="term">CC_READ_ERROR error reading data from command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</p></dd><dt><a name="CC_READ_EXCEPTION"></a><span class="term">CC_READ_EXCEPTION error reading data from command channel (%1)</span></dt><dd><p>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</p></dd><dt><a name="CC_REPLY"></a><span class="term">CC_REPLY replying to message from '%1' with '%2'</span></dt><dd><p>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</p></dd><dt><a name="CC_SET_TIMEOUT"></a><span class="term">CC_SET_TIMEOUT setting timeout to %1ms</span></dt><dd><p>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</p></dd><dt><a name="CC_START_READ"></a><span class="term">CC_START_READ starting asynchronous read</span></dt><dd><p>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</p></dd><dt><a name="CC_SUBSCRIBE"></a><span class="term">CC_SUBSCRIBE subscribing to communication group %1</span></dt><dd><p>
+Debug message. The program wants to receive messages addressed to this group.
+</p></dd><dt><a name="CC_TIMEOUT"></a><span class="term">CC_TIMEOUT timeout reading data from command channel</span></dt><dd><p>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</p></dd><dt><a name="CC_UNSUBSCRIBE"></a><span class="term">CC_UNSUBSCRIBE unsubscribing from communication group %1</span></dt><dd><p>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</p></dd><dt><a name="CC_WRITE_ERROR"></a><span class="term">CC_WRITE_ERROR error writing data to command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</p></dd><dt><a name="CC_ZERO_LENGTH"></a><span class="term">CC_ZERO_LENGTH invalid message length (0)</span></dt><dd><p>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</p></dd><dt><a name="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE"></a><span class="term">CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</span></dt><dd><p>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</p></dd><dt><a name="CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE"></a><span class="term">CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2</span></dt><dd><p>
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+</p></dd><dt><a name="CFGMGR_CC_SESSION_ERROR"></a><span class="term">CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</span></dt><dd><p>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</p></dd><dt><a name="CFGMGR_DATA_READ_ERROR"></a><span class="term">CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</span></dt><dd><p>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</p></dd><dt><a name="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_STOPPED_BY_KEYBOARD"></a><span class="term">CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_BAD_CONFIG_DATA"></a><span class="term">CMDCTL_BAD_CONFIG_DATA error in config data: %1</span></dt><dd><p>
+There was an error reading the updated configuration data. The specific
+error is printed.
+</p></dd><dt><a name="CMDCTL_BAD_PASSWORD"></a><span class="term">CMDCTL_BAD_PASSWORD bad password for user: %1</span></dt><dd><p>
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_CC_SESSION_ERROR"></a><span class="term">CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+</p></dd><dt><a name="CMDCTL_CC_SESSION_TIMEOUT"></a><span class="term">CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel</span></dt><dd><p>
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_COMMAND_ERROR"></a><span class="term">CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3</span></dt><dd><p>
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+</p></dd><dt><a name="CMDCTL_COMMAND_SENT"></a><span class="term">CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent</span></dt><dd><p>
+This debug message indicates that the given command has been sent to
+the given module.
+</p></dd><dt><a name="CMDCTL_NO_SUCH_USER"></a><span class="term">CMDCTL_NO_SUCH_USER username not found in user database: %1</span></dt><dd><p>
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_NO_USER_ENTRIES_READ"></a><span class="term">CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied</span></dt><dd><p>
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_SEND_COMMAND"></a><span class="term">CMDCTL_SEND_COMMAND sending command %1 to module %2</span></dt><dd><p>
+This debug message indicates that the given command is being sent to
+the given module.
+</p></dd><dt><a name="CMDCTL_SSL_SETUP_FAILURE_USER_DENIED"></a><span class="term">CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1</span></dt><dd><p>
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+</p></dd><dt><a name="CMDCTL_STOPPED_BY_KEYBOARD"></a><span class="term">CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_UNCAUGHT_EXCEPTION"></a><span class="term">CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1</span></dt><dd><p>
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+</p></dd><dt><a name="CMDCTL_USER_DATABASE_READ_ERROR"></a><span class="term">CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2</span></dt><dd><p>
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
</p></dd><dt><a name="CONFIG_CCSESSION_MSG"></a><span class="term">CONFIG_CCSESSION_MSG error in CC session message: %1</span></dt><dd><p>
There was a problem with an incoming message on the command and control
channel. The message does not appear to be a valid command, and is
@@ -65,77 +662,152 @@ missing a required element or contains an unknown data format. This
most likely means that another BIND10 module is sending a bad message.
The message itself is ignored by this module.
</p></dd><dt><a name="CONFIG_CCSESSION_MSG_INTERNAL"></a><span class="term">CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</span></dt><dd><p>
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
-</p></dd><dt><a name="CONFIG_FOPEN_ERR"></a><span class="term">CONFIG_FOPEN_ERR error opening %1: %2</span></dt><dd><p>
-There was an error opening the given file.
-</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
-There was a parse error in the JSON file. The given file does not appear
-to be in valid JSON format. Please verify that the filename is correct
-and that the contents are valid JSON.
-</p></dd><dt><a name="CONFIG_MANAGER_CONFIG"></a><span class="term">CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</span></dt><dd><p>
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</p><p>
+The most likely cause of this error is a programming error. Please raise
+a bug report.
+</p></dd><dt><a name="CONFIG_GET_FAIL"></a><span class="term">CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</span></dt><dd><p>
The configuration manager returned an error when this module requested
the configuration. The full error message answer from the configuration
manager is appended to the log error. The most likely cause is that
the module is of a different (command specification) version than the
running configuration manager.
-</p></dd><dt><a name="CONFIG_MANAGER_MOD_SPEC"></a><span class="term">CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</span></dt><dd><p>
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
-</p></dd><dt><a name="CONFIG_MODULE_SPEC"></a><span class="term">CONFIG_MODULE_SPEC module specification error in %1: %2</span></dt><dd><p>
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
+</p></dd><dt><a name="CONFIG_GET_FAILED"></a><span class="term">CONFIG_GET_FAILED error getting configuration from cfgmgr: %1</span></dt><dd><p>
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
+</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</p></dd><dt><a name="CONFIG_LOG_CONFIG_ERRORS"></a><span class="term">CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1</span></dt><dd><p>
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
+</p></dd><dt><a name="CONFIG_LOG_EXPLICIT"></a><span class="term">CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program. The logging
+configuration for the program will be updated with the information.
+</p></dd><dt><a name="CONFIG_LOG_IGNORE_EXPLICIT"></a><span class="term">CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger. As this does not match the logger specification for the
+program, it has been ignored.
+</p></dd><dt><a name="CONFIG_LOG_IGNORE_WILD"></a><span class="term">CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry. The configuration is ignored.
+</p></dd><dt><a name="CONFIG_LOG_WILD_MATCH"></a><span class="term">CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_FORMAT"></a><span class="term">CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</span></dt><dd><p>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_REJECT"></a><span class="term">CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</span></dt><dd><p>
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+</p></dd><dt><a name="CONFIG_OPEN_FAIL"></a><span class="term">CONFIG_OPEN_FAIL error opening %1: %2</span></dt><dd><p>
+There was an error opening the given file. The reason for the failure
+is included in the message.
</p></dd><dt><a name="DATASRC_CACHE_CREATE"></a><span class="term">DATASRC_CACHE_CREATE creating the hotspot cache</span></dt><dd><p>
-Debug information that the hotspot cache was created at startup.
+This is a debug message issued during startup when the hotspot cache
+is created.
</p></dd><dt><a name="DATASRC_CACHE_DESTROY"></a><span class="term">DATASRC_CACHE_DESTROY destroying the hotspot cache</span></dt><dd><p>
Debug information. The hotspot cache is being destroyed.
-</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the cache</span></dt><dd><p>
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
-</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the cache</span></dt><dd><p>
-The hotspot cache is enabled from now on.
-</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED the item '%1' is expired</span></dt><dd><p>
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
+</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the hotspot cache</span></dt><dd><p>
+A debug message issued when the hotspot cache is disabled.
+</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the hotspot cache</span></dt><dd><p>
+A debug message issued when the hotspot cache is enabled.
+</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired</span></dt><dd><p>
+A debug message issued when a hotspot cache lookup located the item but it
+had expired. The item was removed and the program proceeded as if the item
+had not been found.
</p></dd><dt><a name="DATASRC_CACHE_FOUND"></a><span class="term">DATASRC_CACHE_FOUND the item '%1' was found</span></dt><dd><p>
-Debug information. An item was successfully looked up in the hotspot cache.
-</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL cache is full, dropping oldest</span></dt><dd><p>
+Debug information. An item was successfully located in the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL hotspot cache is full, dropping oldest</span></dt><dd><p>
Debug information. After inserting an item into the hotspot cache, the
maximum number of items was exceeded, so the least recently used item will
be dropped. This should be directly followed by CACHE_REMOVE.
-</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the cache</span></dt><dd><p>
-Debug information. It means a new item is being inserted into the hotspot
+</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache</span></dt><dd><p>
+A debug message indicating that a new item is being inserted into the hotspot
cache.
-</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found</span></dt><dd><p>
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
-</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache</span></dt><dd><p>
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
+</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing</span></dt><dd><p>
Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
-</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the cache</span></dt><dd><p>
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache</span></dt><dd><p>
Debug information. An item is being removed from the hotspot cache.
-</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items</span></dt><dd><p>
The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
means no limit.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</span></dt><dd><p>
+This was an internal error while reading data from a datasource. This can either
+mean the specific data source implementation is not behaving correctly, or the
+data it provides is invalid. The current search is aborted.
+The error message contains specific information about the error.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_RECORDS"></a><span class="term">DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</span></dt><dd><p>
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</span></dt><dd><p>
+The datasource backend provided resource records for the given RRset with
+different TTL values. The TTL of the RRSET is set to the lowest value, which
+is printed in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</span></dt><dd><p>
+There was an uncaught general exception while reading data from a datasource.
+This most likely points to a logic error in the code, and can be considered a
+bug. The current search is aborted. Specific information about the exception is
+printed in this error message.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</span></dt><dd><p>
+There was an uncaught ISC exception while reading data from a datasource. This
+most likely points to a logic error in the code, and can be considered a bug.
+The current search is aborted. Specific information about the exception is
+printed in this error message.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</span></dt><dd><p>
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION_EXACT"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1</span></dt><dd><p>
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DNAME"></a><span class="term">DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1</span></dt><dd><p>
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXDOMAIN"></a><span class="term">DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</span></dt><dd><p>
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXRRSET"></a><span class="term">DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4</span></dt><dd><p>
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_RRSET"></a><span class="term">DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2</span></dt><dd><p>
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
</p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
-Debug information. We're processing some internal query for given name and
-type.
+A debug message indicating that a query for the given name and RR type is being
+processed.
</p></dd><dt><a name="DATASRC_MEM_ADD_RRSET"></a><span class="term">DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</span></dt><dd><p>
Debug information. An RRset is being added to the in-memory data source.
</p></dd><dt><a name="DATASRC_MEM_ADD_WILDCARD"></a><span class="term">DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</span></dt><dd><p>
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
</p></dd><dt><a name="DATASRC_MEM_ADD_ZONE"></a><span class="term">DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</span></dt><dd><p>
Debug information. A zone is being added into the in-memory data source.
</p></dd><dt><a name="DATASRC_MEM_ANY_SUCCESS"></a><span class="term">DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</span></dt><dd><p>
@@ -146,7 +818,7 @@ Debug information. The requested domain is an alias to a different domain,
returning the CNAME instead.
</p></dd><dt><a name="DATASRC_MEM_CNAME_COEXIST"></a><span class="term">DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</span></dt><dd><p>
This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
+other way around -- adding some other data to CNAME.
</p></dd><dt><a name="DATASRC_MEM_CNAME_TO_NONEMPTY"></a><span class="term">DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</span></dt><dd><p>
Someone or something tried to add a CNAME into a domain that already contains
some other data. But the protocol forbids coexistence of CNAME with anything
@@ -164,10 +836,10 @@ encountered on the way. This may lead to redirection to a different domain and
stop the search.
</p></dd><dt><a name="DATASRC_MEM_DNAME_FOUND"></a><span class="term">DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</span></dt><dd><p>
Debug information. A DNAME was found instead of the requested information.
-</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
</p></dd><dt><a name="DATASRC_MEM_DOMAIN_EMPTY"></a><span class="term">DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</span></dt><dd><p>
Debug information. The requested domain exists in the tree of domains, but
it is empty. Therefore it doesn't contain the requested resource type.
@@ -186,7 +858,7 @@ Debug information. A zone object for this zone is being searched for in the
in-memory data source.
</p></dd><dt><a name="DATASRC_MEM_LOAD"></a><span class="term">DATASRC_MEM_LOAD loading zone '%1' from file '%2'</span></dt><dd><p>
Debug information. The content of master file is being loaded into the memory.
-</p></dd><dt><a name="DATASRC_MEM_NOTFOUND"></a><span class="term">DATASRC_MEM_NOTFOUND requested domain '%1' not found</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_NOT_FOUND"></a><span class="term">DATASRC_MEM_NOT_FOUND requested domain '%1' not found</span></dt><dd><p>
Debug information. The requested domain does not exist.
</p></dd><dt><a name="DATASRC_MEM_NS_ENCOUNTERED"></a><span class="term">DATASRC_MEM_NS_ENCOUNTERED encountered a NS</span></dt><dd><p>
Debug information. While searching for the requested domain, a NS was
@@ -222,21 +894,21 @@ destroyed.
Debug information. A domain above wildcard was reached, but there's something
below the requested domain. Therefore the wildcard doesn't apply here. This
behaviour is specified by RFC 1034, section 4.3.3
-</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</span></dt><dd><p>
The software refuses to load DNAME records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
behave and BIND 9 refuses that as well. Please describe your intention using
different tools.
-</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</span></dt><dd><p>
The software refuses to load NS records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
behave and BIND 9 refuses that as well. Please describe your intention using
different tools.
</p></dd><dt><a name="DATASRC_META_ADD"></a><span class="term">DATASRC_META_ADD adding a data source into meta data source</span></dt><dd><p>
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
</p></dd><dt><a name="DATASRC_META_ADD_CLASS_MISMATCH"></a><span class="term">DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</span></dt><dd><p>
-It was attempted to add a data source into a meta data source. But their
+It was attempted to add a data source into a meta data source, but their
classes do not match.
</p></dd><dt><a name="DATASRC_META_REMOVE"></a><span class="term">DATASRC_META_REMOVE removing data source from meta data source</span></dt><dd><p>
Debug information. A data source is being removed from meta data source.
@@ -257,10 +929,10 @@ specific error already.
</p></dd><dt><a name="DATASRC_QUERY_BAD_REFERRAL"></a><span class="term">DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</span></dt><dd><p>
The domain lives in another zone. But it is not possible to generate referral
information for it.
-</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in cache</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache</span></dt><dd><p>
Debug information. The requested data were found in the hotspot cache, so
no query is sent to the real data source.
-</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'</span></dt><dd><p>
Debug information. While processing a query, lookup to the hotspot cache
is being made.
</p></dd><dt><a name="DATASRC_QUERY_COPY_AUTH"></a><span class="term">DATASRC_QUERY_COPY_AUTH copying authoritative section into message</span></dt><dd><p>
@@ -269,20 +941,19 @@ response message.
</p></dd><dt><a name="DATASRC_QUERY_DELEGATION"></a><span class="term">DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</span></dt><dd><p>
Debug information. The software is trying to identify delegation points on the
way down to the given domain.
-</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</span></dt><dd><p>
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</span></dt><dd><p>
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
</p></dd><dt><a name="DATASRC_QUERY_EMPTY_DNAME"></a><span class="term">DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</span></dt><dd><p>
During an attempt to synthesize CNAME from this DNAME it was discovered the
DNAME is empty (it has no records). This indicates problem with supplied data.
</p></dd><dt><a name="DATASRC_QUERY_FAIL"></a><span class="term">DATASRC_QUERY_FAIL query failed</span></dt><dd><p>
Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
+already and a SERVFAIL will be returned to the querying system.
</p></dd><dt><a name="DATASRC_QUERY_FOLLOW_CNAME"></a><span class="term">DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</span></dt><dd><p>
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
</p></dd><dt><a name="DATASRC_QUERY_GET_MX_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</span></dt><dd><p>
Debug information. While processing a query, a MX record was met. It
references the mentioned address, so A/AAAA records for it are looked up
@@ -301,12 +972,12 @@ operation code.
</p></dd><dt><a name="DATASRC_QUERY_IS_AUTH"></a><span class="term">DATASRC_QUERY_IS_AUTH auth query (%1/%2)</span></dt><dd><p>
Debug information. The last DO_QUERY is an auth query.
</p></dd><dt><a name="DATASRC_QUERY_IS_GLUE"></a><span class="term">DATASRC_QUERY_IS_GLUE glue query (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for glue addresses.
+Debug information. The last DO_QUERY is a query for glue addresses.
</p></dd><dt><a name="DATASRC_QUERY_IS_NOGLUE"></a><span class="term">DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for addresses that are not
+Debug information. The last DO_QUERY is a query for addresses that are not
glue.
</p></dd><dt><a name="DATASRC_QUERY_IS_REF"></a><span class="term">DATASRC_QUERY_IS_REF query for referral (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for referral information.
+Debug information. The last DO_QUERY is a query for referral information.
</p></dd><dt><a name="DATASRC_QUERY_IS_SIMPLE"></a><span class="term">DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</span></dt><dd><p>
Debug information. The last DO_QUERY is a simple query.
</p></dd><dt><a name="DATASRC_QUERY_MISPLACED_TASK"></a><span class="term">DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</span></dt><dd><p>
@@ -324,10 +995,10 @@ does not have one. This indicates problem with provided data.
The underlying data source failed to answer the no-glue query. 1 means some
error, 2 is not implemented. The data source should have logged the specific
error already.
-</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
Debug information. The hotspot cache is ignored for authoritative ANY queries
for consistency reasons.
-</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
Debug information. The hotspot cache is ignored for ANY queries for consistency
reasons.
</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</span></dt><dd><p>
@@ -341,7 +1012,7 @@ Lookup of domain failed because the data have no zone that contain the
domain. Maybe someone sent a query to the wrong server for some reason.
</p></dd><dt><a name="DATASRC_QUERY_PROCESS"></a><span class="term">DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</span></dt><dd><p>
Debug information. A sure query is being processed now.
-</p></dd><dt><a name="DATASRC_QUERY_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_PROVE_NX_FAIL"></a><span class="term">DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
The user wants DNSSEC and we discovered the entity doesn't exist (either
domain or the record). But there was an error getting NSEC/NSEC3 record
to prove the nonexistence.
@@ -357,13 +1028,13 @@ The underlying data source failed to answer the simple query. 1 means some
error, 2 is not implemented. The data source should have logged the specific
error already.
</p></dd><dt><a name="DATASRC_QUERY_SYNTH_CNAME"></a><span class="term">DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</span></dt><dd><p>
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
</p></dd><dt><a name="DATASRC_QUERY_TASK_FAIL"></a><span class="term">DATASRC_QUERY_TASK_FAIL task failed with %1</span></dt><dd><p>
The query subtask failed. The reason should have been reported by the subtask
already. The code is 1 for error, 2 for not implemented.
-</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</span></dt><dd><p>
A CNAME led to another CNAME and it led to another, and so on. After 16
CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
might possibly be a loop as well. Note that some of the CNAMEs might have
@@ -377,7 +1048,7 @@ domain is being looked for now.
</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</span></dt><dd><p>
During an attempt to cover the domain by a wildcard an error happened. The
exact kind was hopefully already reported.
-</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
While processing a wildcard, it wasn't possible to prove nonexistence of the
given domain or record. The code is 1 for error and 2 for not implemented.
</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_REFERRAL"></a><span class="term">DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</span></dt><dd><p>
@@ -385,15 +1056,21 @@ While processing a wildcard, a referral was met. But it wasn't possible to get
enough information for it. The code is 1 for error, 2 for not implemented.
</p></dd><dt><a name="DATASRC_SQLITE_CLOSE"></a><span class="term">DATASRC_SQLITE_CLOSE closing SQLite database</span></dt><dd><p>
Debug information. The SQLite data source is closing the database file.
-</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE sQLite data source created</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_SQLITE_CONNCLOSE"></a><span class="term">DATASRC_SQLITE_CONNCLOSE Closing sqlite database</span></dt><dd><p>
+The database file is no longer needed and is being closed.
+</p></dd><dt><a name="DATASRC_SQLITE_CONNOPEN"></a><span class="term">DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'</span></dt><dd><p>
+The database file is being opened so it can start providing data.
+</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE SQLite data source created</span></dt><dd><p>
Debug information. An instance of SQLite data source is being created.
-</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY sQLite data source destroyed</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY SQLite data source destroyed</span></dt><dd><p>
Debug information. An instance of SQLite data source is being destroyed.
+</p></dd><dt><a name="DATASRC_SQLITE_DROPCONN"></a><span class="term">DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized</span></dt><dd><p>
+The object around a database connection is being destroyed.
</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE"></a><span class="term">DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</span></dt><dd><p>
-Debug information. The SQLite data source is trying to identify, which zone
+Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
-</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOTFOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</span></dt><dd><p>
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOT_FOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'</span></dt><dd><p>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
no such zone in our data.
</p></dd><dt><a name="DATASRC_SQLITE_FIND"></a><span class="term">DATASRC_SQLITE_FIND looking for RRset '%1/%2'</span></dt><dd><p>
Debug information. The SQLite data source is looking up a resource record
@@ -417,7 +1094,7 @@ and type in the database.
Debug information. The SQLite data source is identifying if this domain is
a referral and where it goes.
</p></dd><dt><a name="DATASRC_SQLITE_FINDREF_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</span></dt><dd><p>
-The SQLite data source was trying to identify, if there's a referral. But
+The SQLite data source was trying to identify if there's a referral. But
it contains different class than the query was for.
</p></dd><dt><a name="DATASRC_SQLITE_FIND_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
The SQLite data source was looking up an RRset, but the data source contains
@@ -428,21 +1105,30 @@ source.
</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</span></dt><dd><p>
The SQLite data source was asked to provide a NSEC3 record for given zone.
But it doesn't contain that zone.
+</p></dd><dt><a name="DATASRC_SQLITE_NEWCONN"></a><span class="term">DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized</span></dt><dd><p>
+A wrapper object to hold database connection is being initialized.
</p></dd><dt><a name="DATASRC_SQLITE_OPEN"></a><span class="term">DATASRC_SQLITE_OPEN opening SQLite database '%1'</span></dt><dd><p>
Debug information. The SQLite data source is loading an SQLite database in
the provided file.
</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS"></a><span class="term">DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</span></dt><dd><p>
-Debug information. We're trying to look up name preceding the supplied one.
+This is a debug message. The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS_NO_ZONE"></a><span class="term">DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</span></dt><dd><p>
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
</p></dd><dt><a name="DATASRC_SQLITE_SETUP"></a><span class="term">DATASRC_SQLITE_SETUP setting up SQLite database</span></dt><dd><p>
The database for SQLite data source was found empty. It is assumed this is the
first run and it is being initialized with current schema. It'll still contain
no data, but it will be ready for use.
-</p></dd><dt><a name="DATASRC_STATIC_BAD_CLASS"></a><span class="term">DATASRC_STATIC_BAD_CLASS static data source can handle CH only</span></dt><dd><p>
-For some reason, someone asked the static data source a query that is not in
-the CH class.
+</p></dd><dt><a name="DATASRC_STATIC_CLASS_NOT_CH"></a><span class="term">DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only</span></dt><dd><p>
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
</p></dd><dt><a name="DATASRC_STATIC_CREATE"></a><span class="term">DATASRC_STATIC_CREATE creating the static datasource</span></dt><dd><p>
Debug information. The static data source (the one holding stuff like
version.bind) is being created.
@@ -452,142 +1138,229 @@ data source.
</p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
This indicates a programming error. An internal task of unknown type was
generated.
-</p></dd><dt><a name="LOGIMPL_ABOVEDBGMAX"></a><span class="term">LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
-</p></dd><dt><a name="LOGIMPL_BADDEBUG"></a><span class="term">LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</span></dt><dd><p>
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form. In particular,
-it starts DEBUG but does not end with an integer.
-</p></dd><dt><a name="LOGIMPL_BELOWDBGMIN"></a><span class="term">LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
-</p></dd><dt><a name="MSG_BADDESTINATION"></a><span class="term">MSG_BADDESTINATION unrecognized log destination: %1</span></dt><dd><p>
+</p></dd><dt><a name="LOGIMPL_ABOVE_MAX_DEBUG"></a><span class="term">LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BAD_DEBUG_STRING"></a><span class="term">LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</span></dt><dd><p>
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BELOW_MIN_DEBUG"></a><span class="term">LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOG_BAD_DESTINATION"></a><span class="term">LOG_BAD_DESTINATION unrecognized log destination: %1</span></dt><dd><p>
A logger destination value was given that was not recognized. The
destination should be one of "console", "file", or "syslog".
-</p></dd><dt><a name="MSG_BADSEVERITY"></a><span class="term">MSG_BADSEVERITY unrecognized log severity: %1</span></dt><dd><p>
+</p></dd><dt><a name="LOG_BAD_SEVERITY"></a><span class="term">LOG_BAD_SEVERITY unrecognized log severity: %1</span></dt><dd><p>
A logger severity value was given that was not recognized. The severity
-should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
-</p></dd><dt><a name="MSG_BADSTREAM"></a><span class="term">MSG_BADSTREAM bad log console output stream: %1</span></dt><dd><p>
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
-</p></dd><dt><a name="MSG_DUPLNS"></a><span class="term">MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
-When reading a message file, more than one $NAMESPACE directive was found. In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
-</p></dd><dt><a name="MSG_DUPMSGID"></a><span class="term">MSG_DUPMSGID duplicate message ID (%1) in compiled code</span></dt><dd><p>
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules. (All message
-IDs should be unique throughout BIND10.) This has no impact on the operation
-of the server other that erroneous messages may be logged. (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded. However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
-</p></dd><dt><a name="MSG_IDNOTFND"></a><span class="term">MSG_IDNOTFND could not replace message text for '%1': no such message</span></dt><dd><p>
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
+</p></dd><dt><a name="LOG_BAD_STREAM"></a><span class="term">LOG_BAD_STREAM bad log console output stream: %1</span></dt><dd><p>
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
+</p></dd><dt><a name="LOG_DUPLICATE_MESSAGE_ID"></a><span class="term">LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</span></dt><dd><p>
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code. This indicates a
+programming error; please submit a bug report.
+</p></dd><dt><a name="LOG_DUPLICATE_NAMESPACE"></a><span class="term">LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.) Such a condition is regarded as an
+error and the read will be abandoned.
+</p></dd><dt><a name="LOG_INPUT_OPEN_FAIL"></a><span class="term">LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</span></dt><dd><p>
+The program was not able to open the specified input message file for
+the reason given.
+</p></dd><dt><a name="LOG_INVALID_MESSAGE_ID"></a><span class="term">LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</span></dt><dd><p>
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</p></dd><dt><a name="LOG_NAMESPACE_EXTRA_ARGS"></a><span class="term">LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+</p></dd><dt><a name="LOG_NAMESPACE_INVALID_ARG"></a><span class="term">LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+</p></dd><dt><a name="LOG_NAMESPACE_NO_ARGS"></a><span class="term">LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+</p></dd><dt><a name="LOG_NO_MESSAGE_ID"></a><span class="term">LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+</p></dd><dt><a name="LOG_NO_MESSAGE_TEXT"></a><span class="term">LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</p></dd><dt><a name="LOG_NO_SUCH_MESSAGE"></a><span class="term">LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</span></dt><dd><p>
During start-up a local message file was read. A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary. Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+</p><p>
+There may be several reasons why this message may appear:
+</p><p>
+- The message ID has been mis-spelled in the local message file.
+</p><p>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</p><p>
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+</p><p>
+Whatever the reason, there is no impact on the operation of BIND 10.
+</p></dd><dt><a name="LOG_OPEN_OUTPUT_FAIL"></a><span class="term">LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</span></dt><dd><p>
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
+</p></dd><dt><a name="LOG_PREFIX_EXTRA_ARGS"></a><span class="term">LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</p><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</p></dd><dt><a name="LOG_PREFIX_INVALID_ARG"></a><span class="term">LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
</p><p>
-This message may appear a number of times in the file, once for every such
-unknown message identification.
-</p></dd><dt><a name="MSG_INVMSGID"></a><span class="term">MSG_INVMSGID line %1: invalid message identification '%2'</span></dt><dd><p>
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
-</p></dd><dt><a name="MSG_NOMSGID"></a><span class="term">MSG_NOMSGID line %1: message definition line found without a message ID</span></dt><dd><p>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
-</p></dd><dt><a name="MSG_NOMSGTXT"></a><span class="term">MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
-</p></dd><dt><a name="MSG_NSEXTRARG"></a><span class="term">MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
-</p></dd><dt><a name="MSG_NSINVARG"></a><span class="term">MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
-The $NAMESPACE argument should be a valid C++ namespace. The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct. The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
-</p></dd><dt><a name="MSG_NSNOARG"></a><span class="term">MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
-</p></dd><dt><a name="MSG_OPENIN"></a><span class="term">MSG_OPENIN unable to open message file %1 for input: %2</span></dt><dd><p>
-The program was not able to open the specified input message file for the
-reason given.
-</p></dd><dt><a name="MSG_OPENOUT"></a><span class="term">MSG_OPENOUT unable to open %1 for output: %2</span></dt><dd><p>
-The program was not able to open the specified output file for the reason
-given.
-</p></dd><dt><a name="MSG_PRFEXTRARG"></a><span class="term">MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created. This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
-</p></dd><dt><a name="MSG_PRFINVARG"></a><span class="term">MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
-The $PREFIX argument is used in a symbol name in a C++ header file. As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
-</p></dd><dt><a name="MSG_RDLOCMES"></a><span class="term">MSG_RDLOCMES reading local message file %1</span></dt><dd><p>
-This is an informational message output by BIND10 when it starts to read a
-local message file. (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
-</p></dd><dt><a name="MSG_READERR"></a><span class="term">MSG_READERR error reading from message file %1: %2</span></dt><dd><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</p></dd><dt><a name="LOG_READING_LOCAL_FILE"></a><span class="term">LOG_READING_LOCAL_FILE reading local message file %1</span></dt><dd><p>
+This is an informational message output by BIND 10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
+</p></dd><dt><a name="LOG_READ_ERROR"></a><span class="term">LOG_READ_ERROR error reading from message file %1: %2</span></dt><dd><p>
The specified error was encountered reading from the named message file.
-</p></dd><dt><a name="MSG_UNRECDIR"></a><span class="term">MSG_UNRECDIR line %1: unrecognised directive '%2'</span></dt><dd><p>
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
-</p></dd><dt><a name="MSG_WRITERR"></a><span class="term">MSG_WRITERR error writing to %1: %2</span></dt><dd><p>
-The specified error was encountered by the message compiler when writing to
-the named output file.
-</p></dd><dt><a name="NSAS_INVRESPSTR"></a><span class="term">NSAS_INVRESPSTR queried for %1 but got invalid response</span></dt><dd><p>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for a RR for the
-specified nameserver but received an invalid response. Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
-</p></dd><dt><a name="NSAS_INVRESPTC"></a><span class="term">NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
-</p></dd><dt><a name="NSAS_LOOKUPCANCEL"></a><span class="term">NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</span></dt><dd><p>
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
-</p></dd><dt><a name="NSAS_LOOKUPZONE"></a><span class="term">NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</span></dt><dd><p>
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
-</p></dd><dt><a name="NSAS_NSADDR"></a><span class="term">NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
-</p></dd><dt><a name="NSAS_NSLKUPFAIL"></a><span class="term">NSAS_NSLKUPFAIL failed to lookup any %1 for %2</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver. This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
-</p></dd><dt><a name="NSAS_NSLKUPSUCC"></a><span class="term">NSAS_NSLKUPSUCC found address %1 for %2</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
-</p></dd><dt><a name="NSAS_SETRTT"></a><span class="term">NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</span></dt><dd><p>
+</p></dd><dt><a name="LOG_UNRECOGNISED_DIRECTIVE"></a><span class="term">LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</span></dt><dd><p>
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
+</p></dd><dt><a name="LOG_WRITE_ERROR"></a><span class="term">LOG_WRITE_ERROR error writing to %1: %2</span></dt><dd><p>
+The specified error was encountered by the message compiler when writing
+to the named output file.
+</p></dd><dt><a name="NOTIFY_OUT_INVALID_ADDRESS"></a><span class="term">NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</span></dt><dd><p>
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_OPCODE"></a><span class="term">NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_QID"></a><span class="term">NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_QUERY_NAME"></a><span class="term">NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_QR_NOT_SET"></a><span class="term">NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2</span></dt><dd><p>
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION"></a><span class="term">NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1</span></dt><dd><p>
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+</p></dd><dt><a name="NOTIFY_OUT_RETRY_EXCEEDED"></a><span class="term">NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded</span></dt><dd><p>
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+</p></dd><dt><a name="NOTIFY_OUT_SENDING_NOTIFY"></a><span class="term">NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2</span></dt><dd><p>
+A notify message is sent to the secondary nameserver at the given
+address.
+</p></dd><dt><a name="NOTIFY_OUT_SOCKET_ERROR"></a><span class="term">NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3</span></dt><dd><p>
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
+</p></dd><dt><a name="NOTIFY_OUT_SOCKET_RECV_ERROR"></a><span class="term">NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3</span></dt><dd><p>
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
+</p></dd><dt><a name="NOTIFY_OUT_TIMEOUT"></a><span class="term">NOTIFY_OUT_TIMEOUT retry notify to %1#%2</span></dt><dd><p>
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
+</p></dd><dt><a name="NSAS_FIND_NS_ADDRESS"></a><span class="term">NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
+</p></dd><dt><a name="NSAS_FOUND_ADDRESS"></a><span class="term">NSAS_FOUND_ADDRESS found address %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
+</p></dd><dt><a name="NSAS_INVALID_RESPONSE"></a><span class="term">NSAS_INVALID_RESPONSE queried for %1 but got invalid response</span></dt><dd><p>
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</p><p>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+</p></dd><dt><a name="NSAS_LOOKUP_CANCEL"></a><span class="term">NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</span></dt><dd><p>
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
+</p></dd><dt><a name="NSAS_NS_LOOKUP_FAIL"></a><span class="term">NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
+</p></dd><dt><a name="NSAS_SEARCH_ZONE_NS"></a><span class="term">NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</span></dt><dd><p>
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
+</p></dd><dt><a name="NSAS_UPDATE_RTT"></a><span class="term">NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</span></dt><dd><p>
A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver. The RTT has been updated using the value given and the new RTT is
-displayed. (The RTT is subject to a calculation that damps out sudden
-changes. As a result, the new RTT is not necessarily equal to the RTT
-reported.)
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</p></dd><dt><a name="NSAS_WRONG_ANSWER"></a><span class="term">NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</p><p>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</p></dd><dt><a name="RESLIB_ANSWER"></a><span class="term">RESLIB_ANSWER answer received in response to query for <%1></span></dt><dd><p>
A debug message recording that an answer has been received to an upstream
query for the specified question. Previous debug messages will have indicated
@@ -599,95 +1372,95 @@ the server to which the question was sent.
</p></dd><dt><a name="RESLIB_DEEPEST"></a><span class="term">RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</span></dt><dd><p>
A debug message, a cache lookup did not find the specified <name, class,
type> tuple in the cache; instead, the deepest delegation found is indicated.
-</p></dd><dt><a name="RESLIB_FOLLOWCNAME"></a><span class="term">RESLIB_FOLLOWCNAME following CNAME chain to <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_FOLLOW_CNAME"></a><span class="term">RESLIB_FOLLOW_CNAME following CNAME chain to <%1></span></dt><dd><p>
A debug message, a CNAME response was received and another query is being issued
for the <name, class, type> tuple.
-</p></dd><dt><a name="RESLIB_LONGCHAIN"></a><span class="term">RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_LONG_CHAIN"></a><span class="term">RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
A debug message recording that a CNAME response has been received to an upstream
query for the specified question (Previous debug messages will have indicated
the server to which the question was sent). However, receipt of this CNAME
has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
is where on CNAME points to another) and so an error is being returned.
-</p></dd><dt><a name="RESLIB_NONSRRSET"></a><span class="term">RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NO_NS_RRSET"></a><span class="term">RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
A debug message, this indicates that a response was received for the specified
-query and was categorised as a referral. However, the received message did
+query and was categorized as a referral. However, the received message did
not contain any NS RRsets. This may indicate a programming error in the
response classification code.
-</p></dd><dt><a name="RESLIB_NSASLOOK"></a><span class="term">RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NSAS_LOOKUP"></a><span class="term">RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
A debug message, the RunningQuery object is querying the NSAS for the
nameservers for the specified zone.
-</p></dd><dt><a name="RESLIB_NXDOMRR"></a><span class="term">RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NXDOM_NXRR"></a><span class="term">RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
A debug message recording that either a NXDOMAIN or an NXRRSET response has
been received to an upstream query for the specified question. Previous debug
messages will have indicated the server to which the question was sent.
</p></dd><dt><a name="RESLIB_PROTOCOL"></a><span class="term">RESLIB_PROTOCOL protocol error in answer for %1: %3</span></dt><dd><p>
A debug message indicating that a protocol error was received. As there
are no retries left, an error will be reported.
-</p></dd><dt><a name="RESLIB_PROTOCOLRTRY"></a><span class="term">RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_PROTOCOL_RETRY"></a><span class="term">RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
-</p></dd><dt><a name="RESLIB_RCODERR"></a><span class="term">RESLIB_RCODERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RCODE_ERR"></a><span class="term">RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
-</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
-A debug message recording that a referral response has been received to an
-upstream query for the specified question. Previous debug messages will
-have indicated the server to which the question was sent.
-</p></dd><dt><a name="RESLIB_REFERZONE"></a><span class="term">RESLIB_REFERZONE referred to zone %1</span></dt><dd><p>
-A debug message indicating that the last referral message was to the specified
-zone.
-</p></dd><dt><a name="RESLIB_RESCAFND"></a><span class="term">RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_FIND"></a><span class="term">RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
This is a debug message and indicates that a RecursiveQuery object found the
the specified <name, class, type> tuple in the cache. The instance number
at the end of the message indicates which of the two resolve() methods has
been called.
-</p></dd><dt><a name="RESLIB_RESCANOTFND"></a><span class="term">RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_NO_FIND"></a><span class="term">RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
This is a debug message and indicates that the look in the cache made by the
RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
object has been created to resolve the question. The instance number at
the end of the message indicates which of the two resolve() methods has
been called.
+</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_REFER_ZONE"></a><span class="term">RESLIB_REFER_ZONE referred to zone %1</span></dt><dd><p>
+A debug message indicating that the last referral message was to the specified
+zone.
</p></dd><dt><a name="RESLIB_RESOLVE"></a><span class="term">RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</span></dt><dd><p>
A debug message, the RecursiveQuery::resolve method has been called to resolve
the specified <name, class, type> tuple. The first action will be to lookup
the specified tuple in the cache. The instance number at the end of the
message indicates which of the two resolve() methods has been called.
-</p></dd><dt><a name="RESLIB_RRSETFND"></a><span class="term">RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RRSET_FOUND"></a><span class="term">RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
A debug message, indicating that when RecursiveQuery::resolve queried the
cache, a single RRset was found which was put in the answer. The instance
number at the end of the message indicates which of the two resolve()
methods has been called.
</p></dd><dt><a name="RESLIB_RTT"></a><span class="term">RESLIB_RTT round-trip time of last query calculated as %1 ms</span></dt><dd><p>
A debug message giving the round-trip time of the last query and response.
-</p></dd><dt><a name="RESLIB_RUNCAFND"></a><span class="term">RESLIB_RUNCAFND found <%1> in the cache</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_FIND"></a><span class="term">RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</span></dt><dd><p>
This is a debug message and indicates that a RunningQuery object found
the specified <name, class, type> tuple in the cache.
-</p></dd><dt><a name="RESLIB_RUNCALOOK"></a><span class="term">RESLIB_RUNCALOOK looking up up <%1> in the cache</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_LOOKUP"></a><span class="term">RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</span></dt><dd><p>
This is a debug message and indicates that a RunningQuery object has made
a call to its doLookup() method to look up the specified <name, class, type>
tuple, the first action of which will be to examine the cache.
-</p></dd><dt><a name="RESLIB_RUNQUFAIL"></a><span class="term">RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_FAIL"></a><span class="term">RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</span></dt><dd><p>
A debug message indicating that a RunningQuery's failure callback has been
called because all nameservers for the zone in question are unreachable.
-</p></dd><dt><a name="RESLIB_RUNQUSUCC"></a><span class="term">RESLIB_RUNQUSUCC success callback - sending query to %1</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_SUCCESS"></a><span class="term">RESLIB_RUNQ_SUCCESS success callback - sending query to %1</span></dt><dd><p>
A debug message indicating that a RunningQuery's success callback has been
called because a nameserver has been found, and that a query is being sent
to the specified nameserver.
-</p></dd><dt><a name="RESLIB_TESTSERV"></a><span class="term">RESLIB_TESTSERV setting test server to %1(%2)</span></dt><dd><p>
-This is an internal debugging message and is only generated in unit tests.
-It indicates that all upstream queries from the resolver are being routed to
-the specified server, regardless of the address of the nameserver to which
-the query would normally be routed. As it should never be seen in normal
-operation, it is a warning message instead of a debug message.
-</p></dd><dt><a name="RESLIB_TESTUPSTR"></a><span class="term">RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_TEST_SERVER"></a><span class="term">RESLIB_TEST_SERVER setting test server to %1(%2)</span></dt><dd><p>
+This is a warning message only generated in unit tests. It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed. If seen during normal operation,
+please submit a bug report.
+</p></dd><dt><a name="RESLIB_TEST_UPSTREAM"></a><span class="term">RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</span></dt><dd><p>
This is a debug message and should only be seen in unit tests. A query for
the specified <name, class, type> tuple is being sent to a test nameserver
whose address is given in the message.
</p></dd><dt><a name="RESLIB_TIMEOUT"></a><span class="term">RESLIB_TIMEOUT query <%1> to %2 timed out</span></dt><dd><p>
-A debug message indicating that the specified query has timed out and as
-there are no retries left, an error will be reported.
-</p></dd><dt><a name="RESLIB_TIMEOUTRTRY"></a><span class="term">RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
+</p></dd><dt><a name="RESLIB_TIMEOUT_RETRY"></a><span class="term">RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
A debug message indicating that the specified query has timed out and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
@@ -699,143 +1472,610 @@ gives no cause for concern.
</p></dd><dt><a name="RESLIB_UPSTREAM"></a><span class="term">RESLIB_UPSTREAM sending upstream query for <%1> to %2</span></dt><dd><p>
A debug message indicating that a query for the specified <name, class, type>
tuple is being sent to a nameserver whose address is given in the message.
-</p></dd><dt><a name="RESOLVER_AXFRTCP"></a><span class="term">RESOLVER_AXFRTCP AXFR request received over TCP</span></dt><dd><p>
-A debug message, the resolver received a NOTIFY message over TCP. The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_AXFRUDP"></a><span class="term">RESOLVER_AXFRUDP AXFR request received over UDP</span></dt><dd><p>
-A debug message, the resolver received a NOTIFY message over UDP. The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
-</p></dd><dt><a name="RESOLVER_CLTMOSMALL"></a><span class="term">RESOLVER_CLTMOSMALL client timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the query
-timeout is too small.
-</p></dd><dt><a name="RESOLVER_CONFIGCHAN"></a><span class="term">RESOLVER_CONFIGCHAN configuration channel created</span></dt><dd><p>
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
-</p></dd><dt><a name="RESOLVER_CONFIGERR"></a><span class="term">RESOLVER_CONFIGERR error in configuration: %1</span></dt><dd><p>
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error). The reason for the error, given as a parameter in the message,
-will give more details.
-</p></dd><dt><a name="RESOLVER_CONFIGLOAD"></a><span class="term">RESOLVER_CONFIGLOAD configuration loaded</span></dt><dd><p>
-A debug message, output when the resolver configuration has been successfully
-loaded.
-</p></dd><dt><a name="RESOLVER_CONFIGUPD"></a><span class="term">RESOLVER_CONFIGUPD configuration updated: %1</span></dt><dd><p>
-A debug message, the configuration has been updated with the specified
-information.
+</p></dd><dt><a name="RESOLVER_AXFR_TCP"></a><span class="term">RESOLVER_AXFR_TCP AXFR request received over TCP</span></dt><dd><p>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_AXFR_UDP"></a><span class="term">RESOLVER_AXFR_UDP AXFR request received over UDP</span></dt><dd><p>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_CLIENT_TIME_SMALL"></a><span class="term">RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
+</p></dd><dt><a name="RESOLVER_CONFIG_CHANNEL"></a><span class="term">RESOLVER_CONFIG_CHANNEL configuration channel created</span></dt><dd><p>
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
+</p></dd><dt><a name="RESOLVER_CONFIG_ERROR"></a><span class="term">RESOLVER_CONFIG_ERROR error in configuration: %1</span></dt><dd><p>
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
+</p></dd><dt><a name="RESOLVER_CONFIG_LOADED"></a><span class="term">RESOLVER_CONFIG_LOADED configuration loaded</span></dt><dd><p>
+This is a debug message output when the resolver configuration has been
+successfully loaded.
+</p></dd><dt><a name="RESOLVER_CONFIG_UPDATED"></a><span class="term">RESOLVER_CONFIG_UPDATED configuration updated: %1</span></dt><dd><p>
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
</p></dd><dt><a name="RESOLVER_CREATED"></a><span class="term">RESOLVER_CREATED main resolver object created</span></dt><dd><p>
-A debug message, output when the Resolver() object has been created.
-</p></dd><dt><a name="RESOLVER_DNSMSGRCVD"></a><span class="term">RESOLVER_DNSMSGRCVD DNS message received: %1</span></dt><dd><p>
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
-</p></dd><dt><a name="RESOLVER_DNSMSGSENT"></a><span class="term">RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
-A debug message, this contains details of the response sent back to the querying
-system.
+This is a debug message indicating that the main resolver object has
+been created.
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_RECEIVED"></a><span class="term">RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</span></dt><dd><p>
+This is a debug message from the resolver listing the contents of a
+received DNS message.
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_SENT"></a><span class="term">RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
</p></dd><dt><a name="RESOLVER_FAILED"></a><span class="term">RESOLVER_FAILED resolver failed, reason: %1</span></dt><dd><p>
-This is an error message output when an unhandled exception is caught by the
-resolver. All it can do is to shut down.
-</p></dd><dt><a name="RESOLVER_FWDADDR"></a><span class="term">RESOLVER_FWDADDR setting forward address %1(%2)</span></dt><dd><p>
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
-</p></dd><dt><a name="RESOLVER_FWDQUERY"></a><span class="term">RESOLVER_FWDQUERY processing forward query</span></dt><dd><p>
-The received query has passed all checks and is being forwarded to upstream
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
+</p></dd><dt><a name="RESOLVER_FORWARD_ADDRESS"></a><span class="term">RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</span></dt><dd><p>
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
+</p></dd><dt><a name="RESOLVER_FORWARD_QUERY"></a><span class="term">RESOLVER_FORWARD_QUERY processing forward query</span></dt><dd><p>
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
servers.
-</p></dd><dt><a name="RESOLVER_HDRERR"></a><span class="term">RESOLVER_HDRERR message received, exception when processing header: %1</span></dt><dd><p>
-A debug message noting that an exception occurred during the processing of
-a received packet. The packet has been dropped.
+</p></dd><dt><a name="RESOLVER_HEADER_ERROR"></a><span class="term">RESOLVER_HEADER_ERROR message received, exception when processing header: %1</span></dt><dd><p>
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
</p></dd><dt><a name="RESOLVER_IXFR"></a><span class="term">RESOLVER_IXFR IXFR request received</span></dt><dd><p>
-The resolver received a NOTIFY message over TCP. The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_LKTMOSMALL"></a><span class="term">RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
-</p></dd><dt><a name="RESOLVER_NFYNOTAUTH"></a><span class="term">RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</span></dt><dd><p>
-The resolver received a NOTIFY message. As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
-</p></dd><dt><a name="RESOLVER_NORMQUERY"></a><span class="term">RESOLVER_NORMQUERY processing normal query</span></dt><dd><p>
-The received query has passed all checks and is being processed by the resolver.
-</p></dd><dt><a name="RESOLVER_NOROOTADDR"></a><span class="term">RESOLVER_NOROOTADDR no root addresses available</span></dt><dd><p>
-A warning message during startup, indicates that no root addresses have been
-set. This may be because the resolver will get them from a priming query.
-</p></dd><dt><a name="RESOLVER_NOTIN"></a><span class="term">RESOLVER_NOTIN non-IN class request received, returning REFUSED message</span></dt><dd><p>
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
-</p></dd><dt><a name="RESOLVER_NOTONEQUES"></a><span class="term">RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</span></dt><dd><p>
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message. This is a malformed
-message, as a DNS query must contain only one question. The resolver will
-return a message to the sender with the RCODE set to FORMERR.
-</p></dd><dt><a name="RESOLVER_OPCODEUNS"></a><span class="term">RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</span></dt><dd><p>
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes). It will return a message to the sender
-with the RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_PARSEERR"></a><span class="term">RESOLVER_PARSEERR error parsing received message: %1 - returning %2</span></dt><dd><p>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded). The message parameters give
-a textual description of the problem and the RCODE returned.
-</p></dd><dt><a name="RESOLVER_PRINTMSG"></a><span class="term">RESOLVER_PRINTMSG print message command, aeguments are: %1</span></dt><dd><p>
-This message is logged when a "print_message" command is received over the
-command channel.
-</p></dd><dt><a name="RESOLVER_PROTERR"></a><span class="term">RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded). The message parameters give a textual
-description of the problem and the RCODE returned.
-</p></dd><dt><a name="RESOLVER_QUSETUP"></a><span class="term">RESOLVER_QUSETUP query setup</span></dt><dd><p>
-A debug message noting that the resolver is creating a RecursiveQuery object.
-</p></dd><dt><a name="RESOLVER_QUSHUT"></a><span class="term">RESOLVER_QUSHUT query shutdown</span></dt><dd><p>
-A debug message noting that the resolver is destroying a RecursiveQuery object.
-</p></dd><dt><a name="RESOLVER_QUTMOSMALL"></a><span class="term">RESOLVER_QUTMOSMALL query timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_LOOKUP_TIME_SMALL"></a><span class="term">RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
+</p></dd><dt><a name="RESOLVER_MESSAGE_ERROR"></a><span class="term">RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_NEGATIVE_RETRIES"></a><span class="term">RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</span></dt><dd><p>
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
+</p></dd><dt><a name="RESOLVER_NON_IN_PACKET"></a><span class="term">RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</span></dt><dd><p>
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
+</p></dd><dt><a name="RESOLVER_NORMAL_QUERY"></a><span class="term">RESOLVER_NORMAL_QUERY processing normal query</span></dt><dd><p>
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
+</p></dd><dt><a name="RESOLVER_NOTIFY_RECEIVED"></a><span class="term">RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</span></dt><dd><p>
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
+</p></dd><dt><a name="RESOLVER_NOT_ONE_QUESTION"></a><span class="term">RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</span></dt><dd><p>
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_NO_ROOT_ADDRESS"></a><span class="term">RESOLVER_NO_ROOT_ADDRESS no root addresses available</span></dt><dd><p>
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
+</p></dd><dt><a name="RESOLVER_PARSE_ERROR"></a><span class="term">RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
+</p></dd><dt><a name="RESOLVER_PRINT_COMMAND"></a><span class="term">RESOLVER_PRINT_COMMAND print message command, arguments are: %1</span></dt><dd><p>
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
+</p></dd><dt><a name="RESOLVER_PROTOCOL_ERROR"></a><span class="term">RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_QUERY_ACCEPTED"></a><span class="term">RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</span></dt><dd><p>
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_DROPPED"></a><span class="term">RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</span></dt><dd><p>
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_REJECTED"></a><span class="term">RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</span></dt><dd><p>
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_SETUP"></a><span class="term">RESOLVER_QUERY_SETUP query setup</span></dt><dd><p>
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUERY_SHUTDOWN"></a><span class="term">RESOLVER_QUERY_SHUTDOWN query shutdown</span></dt><dd><p>
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUERY_TIME_SMALL"></a><span class="term">RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
+</p></dd><dt><a name="RESOLVER_RECEIVED_MESSAGE"></a><span class="term">RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</span></dt><dd><p>
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
</p></dd><dt><a name="RESOLVER_RECURSIVE"></a><span class="term">RESOLVER_RECURSIVE running in recursive mode</span></dt><dd><p>
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
-</p></dd><dt><a name="RESOLVER_RECVMSG"></a><span class="term">RESOLVER_RECVMSG resolver has received a DNS message</span></dt><dd><p>
-A debug message indicating that the resolver has received a message. Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
-</p></dd><dt><a name="RESOLVER_RETRYNEG"></a><span class="term">RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</span></dt><dd><p>
-An error message indicating that the resolver configuration has specified a
-negative retry count. Only zero or positive values are valid.
-</p></dd><dt><a name="RESOLVER_ROOTADDR"></a><span class="term">RESOLVER_ROOTADDR setting root address %1(%2)</span></dt><dd><p>
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
-</p></dd><dt><a name="RESOLVER_SERVICE"></a><span class="term">RESOLVER_SERVICE service object created</span></dt><dd><p>
-A debug message, output when the main service object (which handles the
-received queries) is created.
-</p></dd><dt><a name="RESOLVER_SETPARAM"></a><span class="term">RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
-A debug message, lists the parameters associated with the message. These are:
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
+</p></dd><dt><a name="RESOLVER_SERVICE_CREATED"></a><span class="term">RESOLVER_SERVICE_CREATED service object created</span></dt><dd><p>
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
+</p></dd><dt><a name="RESOLVER_SET_PARAMS"></a><span class="term">RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
+This debug message lists the parameters being set for the resolver. These are:
query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers. Client timeout: the interval to resolver a query by
+to upstream servers. Client timeout: the interval to resolve a query by
a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
+whilst continuing to resolve the query. Lookup timeout: the time at which the
resolver gives up trying to resolve a query. Retry count: the number of times
the resolver will retry a query to an upstream server if it gets a timeout.
</p><p>
The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
upstream nameservers. Even if none of these queries timeout, the total time
taken to perform all the queries may exceed the client timeout. When this
happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache. However,
-there comes a time - the lookup timeout - when even the resolve gives up.
+with the resolution process; data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
At this point it will wait for pending upstream queries to complete or
timeout and drop the query.
+</p></dd><dt><a name="RESOLVER_SET_QUERY_ACL"></a><span class="term">RESOLVER_SET_QUERY_ACL query ACL is configured</span></dt><dd><p>
+This debug message is generated when a new query ACL is configured for
+the resolver.
+</p></dd><dt><a name="RESOLVER_SET_ROOT_ADDRESS"></a><span class="term">RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</span></dt><dd><p>
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
</p></dd><dt><a name="RESOLVER_SHUTDOWN"></a><span class="term">RESOLVER_SHUTDOWN resolver shutdown complete</span></dt><dd><p>
-This information message is output when the resolver has shut down.
+This informational message is output when the resolver has shut down.
</p></dd><dt><a name="RESOLVER_STARTED"></a><span class="term">RESOLVER_STARTED resolver started</span></dt><dd><p>
This informational message is output by the resolver when all initialization
has been completed and it is entering its main loop.
</p></dd><dt><a name="RESOLVER_STARTING"></a><span class="term">RESOLVER_STARTING starting resolver with command line '%1'</span></dt><dd><p>
An informational message, this is output when the resolver starts up.
-</p></dd><dt><a name="RESOLVER_UNEXRESP"></a><span class="term">RESOLVER_UNEXRESP received unexpected response, ignoring</span></dt><dd><p>
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
+</p></dd><dt><a name="RESOLVER_UNEXPECTED_RESPONSE"></a><span class="term">RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</span></dt><dd><p>
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
+</p></dd><dt><a name="RESOLVER_UNSUPPORTED_OPCODE"></a><span class="term">RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</span></dt><dd><p>
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="SRVCOMM_ADDRESSES_NOT_LIST"></a><span class="term">SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1</span></dt><dd><p>
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_FAIL"></a><span class="term">SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)</span></dt><dd><p>
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+</p></dd><dt><a name="SRVCOMM_ADDRESS_MISSING"></a><span class="term">SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1</span></dt><dd><p>
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_TYPE"></a><span class="term">SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1</span></dt><dd><p>
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+</p></dd><dt><a name="SRVCOMM_ADDRESS_UNRECOVERABLE"></a><span class="term">SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)</span></dt><dd><p>
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+</p><p>
+The condition indicates problems with the server and/or the system on
+which it is running. The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_VALUE"></a><span class="term">SRVCOMM_ADDRESS_VALUE address to set: %1#%2</span></dt><dd><p>
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+</p></dd><dt><a name="SRVCOMM_KEYS_DEINIT"></a><span class="term">SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring</span></dt><dd><p>
+Debug message indicating that the server is deinitializing the TSIG keyring.
+</p></dd><dt><a name="SRVCOMM_KEYS_INIT"></a><span class="term">SRVCOMM_KEYS_INIT initializing TSIG keyring</span></dt><dd><p>
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+</p></dd><dt><a name="SRVCOMM_KEYS_UPDATE"></a><span class="term">SRVCOMM_KEYS_UPDATE updating TSIG keyring</span></dt><dd><p>
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+</p></dd><dt><a name="SRVCOMM_PORT_RANGE"></a><span class="term">SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)</span></dt><dd><p>
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+</p></dd><dt><a name="SRVCOMM_SET_LISTEN"></a><span class="term">SRVCOMM_SET_LISTEN setting addresses to listen to</span></dt><dd><p>
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
+</p></dd><dt><a name="STATHTTPD_BAD_OPTION_VALUE"></a><span class="term">STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1</span></dt><dd><p>
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+</p></dd><dt><a name="STATHTTPD_CC_SESSION_ERROR"></a><span class="term">STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1</span></dt><dd><p>
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+</p></dd><dt><a name="STATHTTPD_CLOSING"></a><span class="term">STATHTTPD_CLOSING closing %1#%2</span></dt><dd><p>
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+</p></dd><dt><a name="STATHTTPD_CLOSING_CC_SESSION"></a><span class="term">STATHTTPD_CLOSING_CC_SESSION stopping cc session</span></dt><dd><p>
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+</p></dd><dt><a name="STATHTTPD_HANDLE_CONFIG"></a><span class="term">STATHTTPD_HANDLE_CONFIG reading configuration: %1</span></dt><dd><p>
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_STATUS_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status</span></dt><dd><p>
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_UNKNOWN_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</span></dt><dd><p>
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+</p></dd><dt><a name="STATHTTPD_SERVER_ERROR"></a><span class="term">STATHTTPD_SERVER_ERROR HTTP server error: %1</span></dt><dd><p>
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+</p></dd><dt><a name="STATHTTPD_SERVER_INIT_ERROR"></a><span class="term">STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1</span></dt><dd><p>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+</p></dd><dt><a name="STATHTTPD_SHUTDOWN"></a><span class="term">STATHTTPD_SHUTDOWN shutting down</span></dt><dd><p>
+The stats-httpd daemon is shutting down.
+</p></dd><dt><a name="STATHTTPD_STARTED"></a><span class="term">STATHTTPD_STARTED listening on %1#%2</span></dt><dd><p>
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+</p></dd><dt><a name="STATHTTPD_STARTING_CC_SESSION"></a><span class="term">STATHTTPD_STARTING_CC_SESSION starting cc session</span></dt><dd><p>
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+</p></dd><dt><a name="STATHTTPD_START_SERVER_INIT_ERROR"></a><span class="term">STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1</span></dt><dd><p>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+</p></dd><dt><a name="STATHTTPD_STOPPED_BY_KEYBOARD"></a><span class="term">STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+</p></dd><dt><a name="STATHTTPD_UNKNOWN_CONFIG_ITEM"></a><span class="term">STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1</span></dt><dd><p>
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
+</p></dd><dt><a name="STATS_BAD_OPTION_VALUE"></a><span class="term">STATS_BAD_OPTION_VALUE bad command line argument: %1</span></dt><dd><p>
+The stats module was called with a bad command-line argument and will
+not start.
+</p></dd><dt><a name="STATS_CC_SESSION_ERROR"></a><span class="term">STATS_CC_SESSION_ERROR error connecting to message bus: %1</span></dt><dd><p>
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+</p></dd><dt><a name="STATS_RECEIVED_NEW_CONFIG"></a><span class="term">STATS_RECEIVED_NEW_CONFIG received new configuration: %1</span></dt><dd><p>
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+</p></dd><dt><a name="STATS_RECEIVED_REMOVE_COMMAND"></a><span class="term">STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</span></dt><dd><p>
+A remove command for the given name was sent to the stats module, and
+the given statistics value will now be removed. It will not appear in
+statistics reports until it appears in a statistics update from a
+module again.
+</p></dd><dt><a name="STATS_RECEIVED_RESET_COMMAND"></a><span class="term">STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</span></dt><dd><p>
+The stats module received a command to clear all collected statistics.
+The data is cleared until it receives an update from the modules again.
+</p></dd><dt><a name="STATS_RECEIVED_SHOW_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</span></dt><dd><p>
+The stats module received a command to show all statistics that it has
+collected.
+</p></dd><dt><a name="STATS_RECEIVED_SHOW_NAME_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1</span></dt><dd><p>
+The stats module received a command to show the statistics that it has
+collected for the given item.
+</p></dd><dt><a name="STATS_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+A shutdown command was sent to the stats module and it will now shut down.
+</p></dd><dt><a name="STATS_RECEIVED_STATUS_COMMAND"></a><span class="term">STATS_RECEIVED_STATUS_COMMAND received command to return status</span></dt><dd><p>
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+</p></dd><dt><a name="STATS_RECEIVED_UNKNOWN_COMMAND"></a><span class="term">STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</span></dt><dd><p>
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+</p></dd><dt><a name="STATS_SEND_REQUEST_BOSS"></a><span class="term">STATS_SEND_REQUEST_BOSS requesting boss to send statistics</span></dt><dd><p>
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+</p></dd><dt><a name="STATS_STOPPED_BY_KEYBOARD"></a><span class="term">STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+</p></dd><dt><a name="STATS_UNKNOWN_COMMAND_IN_SPEC"></a><span class="term">STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1</span></dt><dd><p>
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+</p></dd><dt><a name="XFRIN_AXFR_DATABASE_FAILURE"></a><span class="term">XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_INTERNAL_FAILURE"></a><span class="term">XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_STARTED"></a><span class="term">XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</span></dt><dd><p>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</span></dt><dd><p>
+The AXFR transfer of the given zone was successfully completed.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_ADDR_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</span></dt><dd><p>
+The given master address is not a valid IP address.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_PORT_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</span></dt><dd><p>
+The master port as read from the configuration is not a valid port number.
+</p></dd><dt><a name="XFRIN_BAD_TSIG_KEY_STRING"></a><span class="term">XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFRIN_BAD_ZONE_CLASS"></a><span class="term">XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</span></dt><dd><p>
+The zone class as read from the configuration is not a valid DNS class.
+</p></dd><dt><a name="XFRIN_CC_SESSION_ERROR"></a><span class="term">XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</p></dd><dt><a name="XFRIN_COMMAND_ERROR"></a><span class="term">XFRIN_COMMAND_ERROR error while executing command '%1': %2</span></dt><dd><p>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</p></dd><dt><a name="XFRIN_CONNECT_MASTER"></a><span class="term">XFRIN_CONNECT_MASTER error connecting to master at %1: %2</span></dt><dd><p>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</p></dd><dt><a name="XFRIN_IMPORT_DNS"></a><span class="term">XFRIN_IMPORT_DNS error importing python DNS module: %1</span></dt><dd><p>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR"></a><span class="term">XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</span></dt><dd><p>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER"></a><span class="term">XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</span></dt><dd><p>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</p></dd><dt><a name="XFRIN_RETRANSFER_UNKNOWN_ZONE"></a><span class="term">XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</span></dt><dd><p>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</p></dd><dt><a name="XFRIN_STARTING"></a><span class="term">XFRIN_STARTING starting resolver with command line '%1'</span></dt><dd><p>
+An informational message, this is output when the resolver starts up.
+</p></dd><dt><a name="XFRIN_STOPPED_BY_KEYBOARD"></a><span class="term">XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFRIN_UNKNOWN_ERROR"></a><span class="term">XFRIN_UNKNOWN_ERROR unknown error: %1</span></dt><dd><p>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_DONE"></a><span class="term">XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</span></dt><dd><p>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_ERROR"></a><span class="term">XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</span></dt><dd><p>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_FAILED"></a><span class="term">XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</span></dt><dd><p>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_STARTED"></a><span class="term">XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</span></dt><dd><p>
+A transfer out of the given zone has started.
+</p></dd><dt><a name="XFROUT_BAD_TSIG_KEY_STRING"></a><span class="term">XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFROUT_CC_SESSION_ERROR"></a><span class="term">XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</p></dd><dt><a name="XFROUT_CC_SESSION_TIMEOUT_ERROR"></a><span class="term">XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</span></dt><dd><p>
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</p></dd><dt><a name="XFROUT_FETCH_REQUEST_ERROR"></a><span class="term">XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</span></dt><dd><p>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</p></dd><dt><a name="XFROUT_HANDLE_QUERY_ERROR"></a><span class="term">XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</span></dt><dd><p>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</p></dd><dt><a name="XFROUT_IMPORT"></a><span class="term">XFROUT_IMPORT error importing python module: %1</span></dt><dd><p>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG"></a><span class="term">XFROUT_NEW_CONFIG Update xfrout configuration</span></dt><dd><p>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG_DONE"></a><span class="term">XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</span></dt><dd><p>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</p></dd><dt><a name="XFROUT_NOTIFY_COMMAND"></a><span class="term">XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</span></dt><dd><p>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</p></dd><dt><a name="XFROUT_PARSE_QUERY_ERROR"></a><span class="term">XFROUT_PARSE_QUERY_ERROR error parsing query: %1</span></dt><dd><p>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</p></dd><dt><a name="XFROUT_PROCESS_REQUEST_ERROR"></a><span class="term">XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</span></dt><dd><p>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</span></dt><dd><p>
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</span></dt><dd><p>
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+</p></dd><dt><a name="XFROUT_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</p></dd><dt><a name="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR"></a><span class="term">XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</span></dt><dd><p>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</p></dd><dt><a name="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</span></dt><dd><p>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</p></dd><dt><a name="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</span></dt><dd><p>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</p></dd><dt><a name="XFROUT_SOCKET_SELECT_ERROR"></a><span class="term">XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</span></dt><dd><p>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</p></dd><dt><a name="XFROUT_STOPPED_BY_KEYBOARD"></a><span class="term">XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFROUT_STOPPING"></a><span class="term">XFROUT_STOPPING the xfrout daemon is shutting down</span></dt><dd><p>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</p></dd><dt><a name="XFROUT_UNIX_SOCKET_FILE_IN_USE"></a><span class="term">XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</span></dt><dd><p>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</p></dd><dt><a name="ZONEMGR_CCSESSION_ERROR"></a><span class="term">ZONEMGR_CCSESSION_ERROR command channel session error: %1</span></dt><dd><p>
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+</p></dd><dt><a name="ZONEMGR_JITTER_TOO_BIG"></a><span class="term">ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5</span></dt><dd><p>
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+</p></dd><dt><a name="ZONEMGR_KEYBOARD_INTERRUPT"></a><span class="term">ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt</span></dt><dd><p>
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+</p></dd><dt><a name="ZONEMGR_LOAD_ZONE"></a><span class="term">ZONEMGR_LOAD_ZONE loading zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+</p></dd><dt><a name="ZONEMGR_NO_MASTER_ADDRESS"></a><span class="term">ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master</span></dt><dd><p>
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+</p></dd><dt><a name="ZONEMGR_NO_SOA"></a><span class="term">ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record</span></dt><dd><p>
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+</p></dd><dt><a name="ZONEMGR_NO_TIMER_THREAD"></a><span class="term">ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running</span></dt><dd><p>
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_NO_ZONE_CLASS"></a><span class="term">ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone</span></dt><dd><p>
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</p></dd><dt><a name="ZONEMGR_NO_ZONE_NAME"></a><span class="term">ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone</span></dt><dd><p>
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_NOTIFY"></a><span class="term">ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_SHUTDOWN"></a><span class="term">ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_UNKNOWN"></a><span class="term">ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'</span></dt><dd><p>
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_XFRIN_FAILED"></a><span class="term">ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_XFRIN_SUCCESS"></a><span class="term">ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+</p></dd><dt><a name="ZONEMGR_REFRESH_ZONE"></a><span class="term">ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)</span></dt><dd><p>
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+</p></dd><dt><a name="ZONEMGR_SELECT_ERROR"></a><span class="term">ZONEMGR_SELECT_ERROR error with select(): %1</span></dt><dd><p>
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+</p></dd><dt><a name="ZONEMGR_SEND_FAIL"></a><span class="term">ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed</span></dt><dd><p>
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+</p></dd><dt><a name="ZONEMGR_SESSION_ERROR"></a><span class="term">ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon</span></dt><dd><p>
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</p></dd><dt><a name="ZONEMGR_SESSION_TIMEOUT"></a><span class="term">ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon</span></dt><dd><p>
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</p></dd><dt><a name="ZONEMGR_SHUTDOWN"></a><span class="term">ZONEMGR_SHUTDOWN zone manager has shut down</span></dt><dd><p>
+A debug message, output when the zone manager has shut down completely.
+</p></dd><dt><a name="ZONEMGR_STARTING"></a><span class="term">ZONEMGR_STARTING zone manager starting</span></dt><dd><p>
+A debug message output when the zone manager starts up.
+</p></dd><dt><a name="ZONEMGR_TIMER_THREAD_RUNNING"></a><span class="term">ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running</span></dt><dd><p>
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_FAIL"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_NOTIFIED"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_SUCCESS"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
</p></dd></dl></div><p>
</p></div></div></body></html>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
index eaa8bb9..f5c44b3 100644
--- a/doc/guide/bind10-messages.xml
+++ b/doc/guide/bind10-messages.xml
@@ -5,6 +5,12 @@
<!ENTITY % version SYSTEM "version.ent">
%version;
]>
+<!--
+ This XML document is generated using the system_messages.py tool
+ based on the .mes message files.
+
+ Do not edit this file.
+-->
<book>
<?xml-stylesheet href="bind10-guide.css" type="text/css"?>
@@ -62,16 +68,16 @@
<para>
<variablelist>
-<varlistentry id="ASIODNS_FETCHCOMP">
-<term>ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</term>
+<varlistentry id="ASIODNS_FETCH_COMPLETED">
+<term>ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</term>
<listitem><para>
-A debug message, this records the the upstream fetch (a query made by the
+A debug message, this records that the upstream fetch (a query made by the
resolver on behalf of its client) to the specified address has completed.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_FETCHSTOP">
-<term>ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</term>
+<varlistentry id="ASIODNS_FETCH_STOPPED">
+<term>ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</term>
<listitem><para>
An external component has requested the halting of an upstream fetch. This
is an allowed operation, and the message should only appear if debug is
@@ -79,27 +85,27 @@ enabled.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_OPENSOCK">
-<term>ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</term>
+<varlistentry id="ASIODNS_OPEN_SOCKET">
+<term>ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</term>
<listitem><para>
The asynchronous I/O code encountered an error when trying to open a socket
of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
message.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_RECVSOCK">
-<term>ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</term>
+<varlistentry id="ASIODNS_READ_DATA">
+<term>ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</term>
<listitem><para>
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_RECVTMO">
-<term>ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</term>
+<varlistentry id="ASIODNS_READ_TIMEOUT">
+<term>ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</term>
<listitem><para>
An upstream fetch from the specified address timed out. This may happen for
any number of reasons and is most probably a problem at the remote server
@@ -108,29 +114,1436 @@ enabled.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_SENDSOCK">
-<term>ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</term>
+<varlistentry id="ASIODNS_SEND_DATA">
+<term>ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_ORIGIN">
+<term>ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<listitem><para>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_RESULT">
+<term>ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<listitem><para>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_ERROR">
+<term>AUTH_AXFR_ERROR error handling AXFR request: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_UDP">
+<term>AUTH_AXFR_UDP AXFR query received over UDP</term>
+<listitem><para>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_COMMAND_FAILED">
+<term>AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</term>
+<listitem><para>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_CREATED">
+<term>AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</term>
+<listitem><para>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_ESTABLISHED">
+<term>AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</term>
+<listitem><para>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_STARTED">
+<term>AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</term>
+<listitem><para>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_LOAD_FAIL">
+<term>AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</term>
+<listitem><para>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_UPDATE_FAIL">
+<term>AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</term>
+<listitem><para>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DATA_SOURCE">
+<term>AUTH_DATA_SOURCE data source database file: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DNS_SERVICES_CREATED">
+<term>AUTH_DNS_SERVICES_CREATED DNS services created</term>
+<listitem><para>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_HEADER_PARSE_FAIL">
+<term>AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_TSIG">
+<term>AUTH_LOAD_TSIG loading TSIG keys</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_ZONE">
+<term>AUTH_LOAD_ZONE loaded zone %1/%2</term>
+<listitem><para>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_DISABLED">
+<term>AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_ENABLED">
+<term>AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_QUESTIONS">
+<term>AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_RRTYPE">
+<term>AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_STATS_SESSION">
+<term>AUTH_NO_STATS_SESSION session interface for statistics is not available</term>
+<listitem><para>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_XFRIN">
+<term>AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PARSE_ERROR">
+<term>AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PROTOCOL_ERROR">
+<term>AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_RECEIVED">
+<term>AUTH_PACKET_RECEIVED message received:\n%1</term>
+<listitem><para>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</para><para>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PROCESS_FAIL">
+<term>AUTH_PROCESS_FAIL message processing failure: %1</term>
+<listitem><para>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</para><para>
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_COMMAND">
+<term>AUTH_RECEIVED_COMMAND command '%1' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_SENDSTATS">
+<term>AUTH_RECEIVED_SENDSTATS command 'sendstats' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RESPONSE_RECEIVED">
+<term>AUTH_RESPONSE_RECEIVED received response message, ignoring</term>
+<listitem><para>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_ERROR_RESPONSE">
+<term>AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</term>
+<listitem><para>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_NORMAL_RESPONSE">
+<term>AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</term>
+<listitem><para>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_CREATED">
+<term>AUTH_SERVER_CREATED server created</term>
+<listitem><para>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_FAILED">
+<term>AUTH_SERVER_FAILED server failed: %1</term>
+<listitem><para>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_STARTED">
+<term>AUTH_SERVER_STARTED server started</term>
+<listitem><para>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SQLITE3">
+<term>AUTH_SQLITE3 nothing to do for loading sqlite3</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_CREATED">
+<term>AUTH_STATS_CHANNEL_CREATED STATS session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_ESTABLISHED">
+<term>AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_COMMS">
+<term>AUTH_STATS_COMMS communication error in sending statistics data: %1</term>
+<listitem><para>
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMEOUT">
+<term>AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</term>
+<listitem><para>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_DISABLED">
+<term>AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_SET">
+<term>AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_UNSUPPORTED_OPCODE">
+<term>AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</term>
+<listitem><para>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_CREATED">
+<term>AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_ESTABLISHED">
+<term>AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_COMMS">
+<term>AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_ERROR">
+<term>AUTH_ZONEMGR_ERROR received error response from zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CHECK_MSGQ_ALREADY_RUNNING">
+<term>BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running</term>
+<listitem><para>
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATION_START_AUTH">
+<term>BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</term>
+<listitem><para>
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATION_START_RESOLVER">
+<term>BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</term>
+<listitem><para>
+This message shows whether or not the resolver should be
+started according to the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_INVALID_USER">
+<term>BIND10_INVALID_USER invalid user: %1</term>
+<listitem><para>
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_KILLING_ALL_PROCESSES">
+<term>BIND10_KILLING_ALL_PROCESSES killing all started processes</term>
+<listitem><para>
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_KILL_PROCESS">
+<term>BIND10_KILL_PROCESS killing process %1</term>
+<listitem><para>
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_ALREADY_RUNNING">
+<term>BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start</term>
+<listitem><para>
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_DAEMON_ENDED">
+<term>BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</term>
+<listitem><para>
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_DISAPPEARED">
+<term>BIND10_MSGQ_DISAPPEARED msgq channel disappeared</term>
+<listitem><para>
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_PROCESS_ENDED_NO_EXIT_STATUS">
+<term>BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</term>
+<listitem><para>
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS">
+<term>BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</term>
+<listitem><para>
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_READING_BOSS_CONFIGURATION">
+<term>BIND10_READING_BOSS_CONFIGURATION reading boss configuration</term>
+<listitem><para>
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_COMMAND">
+<term>BIND10_RECEIVED_COMMAND received command: %1</term>
+<listitem><para>
+The boss module received a command and shall now process it. The command
+is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_NEW_CONFIGURATION">
+<term>BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1</term>
+<listitem><para>
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_SIGNAL">
+<term>BIND10_RECEIVED_SIGNAL received signal %1</term>
+<listitem><para>
+The boss module received the given signal.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RESURRECTED_PROCESS">
+<term>BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)</term>
+<listitem><para>
+The given process has been restarted successfully, and is now running
+with the given process id.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RESURRECTING_PROCESS">
+<term>BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...</term>
+<listitem><para>
+The given process has ended unexpectedly, and is now restarted.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SELECT_ERROR">
+<term>BIND10_SELECT_ERROR error in select() call: %1</term>
+<listitem><para>
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SEND_SIGKILL">
+<term>BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)</term>
+<listitem><para>
+The boss module is sending a SIGKILL signal to the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SEND_SIGTERM">
+<term>BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</term>
+<listitem><para>
+The boss module is sending a SIGTERM signal to the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SHUTDOWN">
+<term>BIND10_SHUTDOWN stopping the server</term>
+<listitem><para>
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SHUTDOWN_COMPLETE">
+<term>BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete</term>
+<listitem><para>
+All child processes have been stopped, and the boss process will now
+stop itself.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_BAD_CAUSE">
+<term>BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1</term>
+<listitem><para>
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_BAD_RESPONSE">
+<term>BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</term>
+<listitem><para>
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_CRASHED">
+<term>BIND10_SOCKCREATOR_CRASHED the socket creator crashed</term>
+<listitem><para>
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_EOF">
+<term>BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</term>
+<listitem><para>
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_INIT">
+<term>BIND10_SOCKCREATOR_INIT initializing socket creator parser</term>
+<listitem><para>
+The boss module initializes routines for parsing the socket creator
+protocol.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_KILL">
+<term>BIND10_SOCKCREATOR_KILL killing the socket creator</term>
+<listitem><para>
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_TERMINATE">
+<term>BIND10_SOCKCREATOR_TERMINATE terminating socket creator</term>
+<listitem><para>
+The boss module sends a request to terminate to the socket creator.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_TRANSPORT_ERROR">
+<term>BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1</term>
+<listitem><para>
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_CREATED">
+<term>BIND10_SOCKET_CREATED successfully created socket %1</term>
+<listitem><para>
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_ERROR">
+<term>BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3</term>
+<listitem><para>
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_GET">
+<term>BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</term>
+<listitem><para>
+The boss forwards a request for a socket to the socket creator.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_PROCESS">
+<term>BIND10_STARTED_PROCESS started %1</term>
+<listitem><para>
+The given process has successfully been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_PROCESS_PID">
+<term>BIND10_STARTED_PROCESS_PID started %1 (PID %2)</term>
+<listitem><para>
+The given process has successfully been started, and has the given PID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING">
+<term>BIND10_STARTING starting BIND10: %1</term>
+<listitem><para>
+Informational message on startup that shows the full version.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS">
+<term>BIND10_STARTING_PROCESS starting process %1</term>
+<listitem><para>
+The boss module is starting the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS_PORT">
+<term>BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</term>
+<listitem><para>
+The boss module is starting the given process, which will listen on the
+given port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS_PORT_ADDRESS">
+<term>BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)</term>
+<listitem><para>
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_COMPLETE">
+<term>BIND10_STARTUP_COMPLETE BIND 10 started</term>
+<listitem><para>
+All modules have been successfully started, and BIND 10 is now running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_ERROR">
+<term>BIND10_STARTUP_ERROR error during startup: %1</term>
+<listitem><para>
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT">
+<term>BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</term>
+<listitem><para>
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STOP_PROCESS">
+<term>BIND10_STOP_PROCESS asking %1 to shut down</term>
+<listitem><para>
+The boss module is sending a shutdown command to the given module over
+the message channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_UNKNOWN_CHILD_PROCESS_ENDED">
+<term>BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</term>
+<listitem><para>
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_ENTRY_MISSING_RRSET">
+<term>CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</term>
+<listitem><para>
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_FOUND">
+<term>CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data</term>
+<listitem><para>
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_UNKNOWN">
+<term>CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data</term>
+<listitem><para>
+Debug message. The requested data was not found in the local zone data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_UPDATE">
+<term>CACHE_LOCALZONE_UPDATE updating local zone element at key %1</term>
+<listitem><para>
+Debug message issued when there's update to the local zone section of cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_DEINIT">
+<term>CACHE_MESSAGES_DEINIT deinitialized message cache</term>
+<listitem><para>
+Debug message. It is issued when the server deinitializes the message cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_EXPIRED">
+<term>CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache</term>
+<listitem><para>
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_FOUND">
+<term>CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache</term>
+<listitem><para>
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_INIT">
+<term>CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2</term>
+<listitem><para>
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_REMOVE">
+<term>CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first</term>
+<listitem><para>
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UNCACHEABLE">
+<term>CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3</term>
+<listitem><para>
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UNKNOWN">
+<term>CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache</term>
<listitem><para>
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
+Debug message. The message cache didn't find any entry for the given key.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_UNKORIGIN">
-<term>ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<varlistentry id="CACHE_MESSAGES_UPDATE">
+<term>CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3</term>
<listitem><para>
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_UNKRESULT">
-<term>ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<varlistentry id="CACHE_RESOLVER_DEEPEST">
+<term>CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2</term>
<listitem><para>
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message). This message should
-not appear and may indicate an internal error. Please enter a bug report.
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_INIT">
+<term>CACHE_RESOLVER_INIT initializing resolver cache for class %1</term>
+<listitem><para>
+Debug message. The resolver cache is being created for this given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_INIT_INFO">
+<term>CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1</term>
+<listitem><para>
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOCAL_MSG">
+<term>CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data</term>
+<listitem><para>
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOCAL_RRSET">
+<term>CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data</term>
+<listitem><para>
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOOKUP_MSG">
+<term>CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOOKUP_RRSET">
+<term>CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_NO_QUESTION">
+<term>CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section</term>
+<listitem><para>
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UNKNOWN_CLASS_MSG">
+<term>CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UNKNOWN_CLASS_RRSET">
+<term>CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_MSG">
+<term>CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3</term>
+<listitem><para>
+Debug message. The resolver is updating a message in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_RRSET">
+<term>CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3</term>
+<listitem><para>
+Debug message. The resolver is updating an RRset in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG">
+<term>CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET">
+<term>CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_EXPIRED">
+<term>CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3</term>
+<listitem><para>
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_INIT">
+<term>CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2</term>
+<listitem><para>
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_LOOKUP">
+<term>CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</term>
+<listitem><para>
+Debug message. The resolver is trying to look up data in the RRset cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_NOT_FOUND">
+<term>CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_REMOVE_OLD">
+<term>CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_UNTRUSTED">
+<term>CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_UPDATE">
+<term>CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache</term>
+<listitem><para>
+Debug message. The RRset is updating its data with this given RRset.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ASYNC_READ_FAILED">
+<term>CC_ASYNC_READ_FAILED asynchronous read failed</term>
+<listitem><para>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_CONN_ERROR">
+<term>CC_CONN_ERROR error connecting to message queue (%1)</term>
+<listitem><para>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_DISCONNECT">
+<term>CC_DISCONNECT disconnecting from message queue daemon</term>
+<listitem><para>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISH">
+<term>CC_ESTABLISH trying to establish connection with message queue daemon at %1</term>
+<listitem><para>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISHED">
+<term>CC_ESTABLISHED successfully connected to message queue daemon</term>
+<listitem><para>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVE">
+<term>CC_GROUP_RECEIVE trying to receive a message</term>
+<listitem><para>
+Debug message, noting that a message is expected to come over the command
+channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVED">
+<term>CC_GROUP_RECEIVED message arrived ('%1', '%2')</term>
+<listitem><para>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_SEND">
+<term>CC_GROUP_SEND sending message '%1' to group '%2'</term>
+<listitem><para>
+Debug message, we're about to send a message over the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_INVALID_LENGTHS">
+<term>CC_INVALID_LENGTHS invalid length parameters (%1, %2)</term>
+<listitem><para>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_LENGTH_NOT_READY">
+<term>CC_LENGTH_NOT_READY length not ready</term>
+<listitem><para>
+There should be data representing the length of message on the socket, but it
+is not there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MESSAGE">
+<term>CC_NO_MESSAGE no message ready to be received yet</term>
+<listitem><para>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MSGQ">
+<term>CC_NO_MSGQ unable to connect to message queue (%1)</term>
+<listitem><para>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_ERROR">
+<term>CC_READ_ERROR error reading data from command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_EXCEPTION">
+<term>CC_READ_EXCEPTION error reading data from command channel (%1)</term>
+<listitem><para>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_REPLY">
+<term>CC_REPLY replying to message from '%1' with '%2'</term>
+<listitem><para>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SET_TIMEOUT">
+<term>CC_SET_TIMEOUT setting timeout to %1ms</term>
+<listitem><para>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_START_READ">
+<term>CC_START_READ starting asynchronous read</term>
+<listitem><para>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SUBSCRIBE">
+<term>CC_SUBSCRIBE subscribing to communication group %1</term>
+<listitem><para>
+Debug message. The program wants to receive messages addressed to this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_TIMEOUT">
+<term>CC_TIMEOUT timeout reading data from command channel</term>
+<listitem><para>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_UNSUBSCRIBE">
+<term>CC_UNSUBSCRIBE unsubscribing from communication group %1</term>
+<listitem><para>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_WRITE_ERROR">
+<term>CC_WRITE_ERROR error writing data to command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ZERO_LENGTH">
+<term>CC_ZERO_LENGTH invalid message length (0)</term>
+<listitem><para>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE">
+<term>CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</term>
+<listitem><para>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE">
+<term>CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2</term>
+<listitem><para>
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_CC_SESSION_ERROR">
+<term>CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</term>
+<listitem><para>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_DATA_READ_ERROR">
+<term>CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</term>
+<listitem><para>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_STOPPED_BY_KEYBOARD">
+<term>CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_BAD_CONFIG_DATA">
+<term>CMDCTL_BAD_CONFIG_DATA error in config data: %1</term>
+<listitem><para>
+There was an error reading the updated configuration data. The specific
+error is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_BAD_PASSWORD">
+<term>CMDCTL_BAD_PASSWORD bad password for user: %1</term>
+<listitem><para>
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_CC_SESSION_ERROR">
+<term>CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_CC_SESSION_TIMEOUT">
+<term>CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel</term>
+<listitem><para>
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_COMMAND_ERROR">
+<term>CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3</term>
+<listitem><para>
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_COMMAND_SENT">
+<term>CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent</term>
+<listitem><para>
+This debug message indicates that the given command has been sent to
+the given module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_NO_SUCH_USER">
+<term>CMDCTL_NO_SUCH_USER username not found in user database: %1</term>
+<listitem><para>
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_NO_USER_ENTRIES_READ">
+<term>CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied</term>
+<listitem><para>
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_SEND_COMMAND">
+<term>CMDCTL_SEND_COMMAND sending command %1 to module %2</term>
+<listitem><para>
+This debug message indicates that the given command is being sent to
+the given module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_SSL_SETUP_FAILURE_USER_DENIED">
+<term>CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1</term>
+<listitem><para>
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_STOPPED_BY_KEYBOARD">
+<term>CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_UNCAUGHT_EXCEPTION">
+<term>CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1</term>
+<listitem><para>
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_USER_DATABASE_READ_ERROR">
+<term>CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2</term>
+<listitem><para>
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
</para></listitem>
</varlistentry>
@@ -148,65 +1561,128 @@ The message itself is ignored by this module.
<varlistentry id="CONFIG_CCSESSION_MSG_INTERNAL">
<term>CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</term>
<listitem><para>
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</para><para>
+The most likely cause of this error is a programming error. Please raise
+a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_FOPEN_ERR">
-<term>CONFIG_FOPEN_ERR error opening %1: %2</term>
+<varlistentry id="CONFIG_GET_FAIL">
+<term>CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</term>
<listitem><para>
-There was an error opening the given file.
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_GET_FAILED">
+<term>CONFIG_GET_FAILED error getting configuration from cfgmgr: %1</term>
+<listitem><para>
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
</para></listitem>
</varlistentry>
<varlistentry id="CONFIG_JSON_PARSE">
<term>CONFIG_JSON_PARSE JSON parse error in %1: %2</term>
<listitem><para>
-There was a parse error in the JSON file. The given file does not appear
+There was an error parsing the JSON file. The given file does not appear
to be in valid JSON format. Please verify that the filename is correct
and that the contents are valid JSON.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MANAGER_CONFIG">
-<term>CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</term>
+<varlistentry id="CONFIG_LOG_CONFIG_ERRORS">
+<term>CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1</term>
<listitem><para>
-The configuration manager returned an error when this module requested
-the configuration. The full error message answer from the configuration
-manager is appended to the log error. The most likely cause is that
-the module is of a different (command specification) version than the
-running configuration manager.
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MANAGER_MOD_SPEC">
-<term>CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</term>
+<varlistentry id="CONFIG_LOG_EXPLICIT">
+<term>CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1</term>
<listitem><para>
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program. The logging
+configuration for the program will be updated with the information.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MODULE_SPEC">
-<term>CONFIG_MODULE_SPEC module specification error in %1: %2</term>
+<varlistentry id="CONFIG_LOG_IGNORE_EXPLICIT">
+<term>CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1</term>
<listitem><para>
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger. As this does not match the logger specification for the
+program, it has been ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_IGNORE_WILD">
+<term>CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1</term>
+<listitem><para>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry. The configuration is ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_WILD_MATCH">
+<term>CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1</term>
+<listitem><para>
+This is a debug message. When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MOD_SPEC_FORMAT">
+<term>CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</term>
+<listitem><para>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MOD_SPEC_REJECT">
+<term>CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</term>
+<listitem><para>
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_OPEN_FAIL">
+<term>CONFIG_OPEN_FAIL error opening %1: %2</term>
+<listitem><para>
+There was an error opening the given file. The reason for the failure
+is included in the message.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_CREATE">
<term>DATASRC_CACHE_CREATE creating the hotspot cache</term>
<listitem><para>
-Debug information that the hotspot cache was created at startup.
+This is a debug message issued during startup when the hotspot cache
+is created.
</para></listitem>
</varlistentry>
@@ -218,39 +1694,37 @@ Debug information. The hotspot cache is being destroyed.
</varlistentry>
<varlistentry id="DATASRC_CACHE_DISABLE">
-<term>DATASRC_CACHE_DISABLE disabling the cache</term>
+<term>DATASRC_CACHE_DISABLE disabling the hotspot cache</term>
<listitem><para>
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
+A debug message issued when the hotspot cache is disabled.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_ENABLE">
-<term>DATASRC_CACHE_ENABLE enabling the cache</term>
+<term>DATASRC_CACHE_ENABLE enabling the hotspot cache</term>
<listitem><para>
-The hotspot cache is enabled from now on.
+A debug message issued when the hotspot cache is enabled.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_EXPIRED">
-<term>DATASRC_CACHE_EXPIRED the item '%1' is expired</term>
+<term>DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired</term>
<listitem><para>
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
+A debug message issued when a hotspot cache lookup located the item but it
+had expired. The item was removed and the program proceeded as if the item
+had not been found.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_FOUND">
<term>DATASRC_CACHE_FOUND the item '%1' was found</term>
<listitem><para>
-Debug information. An item was successfully looked up in the hotspot cache.
+Debug information. An item was successfully located in the hotspot cache.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_FULL">
-<term>DATASRC_CACHE_FULL cache is full, dropping oldest</term>
+<term>DATASRC_CACHE_FULL hotspot cache is full, dropping oldest</term>
<listitem><para>
Debug information. After inserting an item into the hotspot cache, the
maximum number of items was exceeded, so the least recently used item will
@@ -259,39 +1733,39 @@ be dropped. This should be directly followed by CACHE_REMOVE.
</varlistentry>
<varlistentry id="DATASRC_CACHE_INSERT">
-<term>DATASRC_CACHE_INSERT inserting item '%1' into the cache</term>
+<term>DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache</term>
<listitem><para>
-Debug information. It means a new item is being inserted into the hotspot
+A debug message indicating that a new item is being inserted into the hotspot
cache.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_NOT_FOUND">
-<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found</term>
+<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache</term>
<listitem><para>
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_OLD_FOUND">
-<term>DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</term>
+<term>DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing</term>
<listitem><para>
Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_REMOVE">
-<term>DATASRC_CACHE_REMOVE removing '%1' from the cache</term>
+<term>DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache</term>
<listitem><para>
Debug information. An item is being removed from the hotspot cache.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_SLOTS">
-<term>DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</term>
+<term>DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items</term>
<listitem><para>
The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
@@ -299,11 +1773,109 @@ means no limit.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_DATABASE_FIND_ERROR">
+<term>DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</term>
+<listitem><para>
+This was an internal error while reading data from a datasource. This can either
+mean the specific data source implementation is not behaving correctly, or the
+data it provides is invalid. The current search is aborted.
+The error message contains specific information about the error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_RECORDS">
+<term>DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</term>
+<listitem><para>
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_TTL_MISMATCH">
+<term>DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</term>
+<listitem><para>
+The datasource backend provided resource records for the given RRset with
+different TTL values. The TTL of the RRSET is set to the lowest value, which
+is printed in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR">
+<term>DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</term>
+<listitem><para>
+There was an uncaught general exception while reading data from a datasource.
+This most likely points to a logic error in the code, and can be considered a
+bug. The current search is aborted. Specific information about the exception is
+printed in this error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR">
+<term>DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</term>
+<listitem><para>
+There was an uncaught ISC exception while reading data from a datasource. This
+most likely points to a logic error in the code, and can be considered a bug.
+The current search is aborted. Specific information about the exception is
+printed in this error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DELEGATION">
+<term>DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</term>
+<listitem><para>
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DELEGATION_EXACT">
+<term>DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1</term>
+<listitem><para>
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DNAME">
+<term>DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1</term>
+<listitem><para>
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_NXDOMAIN">
+<term>DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</term>
+<listitem><para>
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_NXRRSET">
+<term>DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4</term>
+<listitem><para>
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_RRSET">
+<term>DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2</term>
+<listitem><para>
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_DO_QUERY">
<term>DATASRC_DO_QUERY handling query for '%1/%2'</term>
<listitem><para>
-Debug information. We're processing some internal query for given name and
-type.
+A debug message indicating that a query for the given name and RR type is being
+processed.
</para></listitem>
</varlistentry>
@@ -317,8 +1889,9 @@ Debug information. An RRset is being added to the in-memory data source.
<varlistentry id="DATASRC_MEM_ADD_WILDCARD">
<term>DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</term>
<listitem><para>
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
</para></listitem>
</varlistentry>
@@ -349,7 +1922,7 @@ returning the CNAME instead.
<term>DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</term>
<listitem><para>
This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
+other way around -- adding some other data to CNAME.
</para></listitem>
</varlistentry>
@@ -401,11 +1974,11 @@ Debug information. A DNAME was found instead of the requested information.
</varlistentry>
<varlistentry id="DATASRC_MEM_DNAME_NS">
-<term>DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</term>
+<term>DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</term>
<listitem><para>
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
</para></listitem>
</varlistentry>
@@ -457,8 +2030,8 @@ Debug information. The content of master file is being loaded into the memory.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_MEM_NOTFOUND">
-<term>DATASRC_MEM_NOTFOUND requested domain '%1' not found</term>
+<varlistentry id="DATASRC_MEM_NOT_FOUND">
+<term>DATASRC_MEM_NOT_FOUND requested domain '%1' not found</term>
<listitem><para>
Debug information. The requested domain does not exist.
</para></listitem>
@@ -544,7 +2117,7 @@ behaviour is specified by RFC 1034, section 4.3.3
</varlistentry>
<varlistentry id="DATASRC_MEM_WILDCARD_DNAME">
-<term>DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</term>
+<term>DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</term>
<listitem><para>
The software refuses to load DNAME records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
@@ -554,7 +2127,7 @@ different tools.
</varlistentry>
<varlistentry id="DATASRC_MEM_WILDCARD_NS">
-<term>DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</term>
+<term>DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</term>
<listitem><para>
The software refuses to load NS records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
@@ -566,15 +2139,15 @@ different tools.
<varlistentry id="DATASRC_META_ADD">
<term>DATASRC_META_ADD adding a data source into meta data source</term>
<listitem><para>
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_META_ADD_CLASS_MISMATCH">
<term>DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</term>
<listitem><para>
-It was attempted to add a data source into a meta data source. But their
+It was attempted to add a data source into a meta data source, but their
classes do not match.
</para></listitem>
</varlistentry>
@@ -634,7 +2207,7 @@ information for it.
</varlistentry>
<varlistentry id="DATASRC_QUERY_CACHED">
-<term>DATASRC_QUERY_CACHED data for %1/%2 found in cache</term>
+<term>DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache</term>
<listitem><para>
Debug information. The requested data were found in the hotspot cache, so
no query is sent to the real data source.
@@ -642,7 +2215,7 @@ no query is sent to the real data source.
</varlistentry>
<varlistentry id="DATASRC_QUERY_CHECK_CACHE">
-<term>DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</term>
+<term>DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'</term>
<listitem><para>
Debug information. While processing a query, lookup to the hotspot cache
is being made.
@@ -666,12 +2239,11 @@ way down to the given domain.
</varlistentry>
<varlistentry id="DATASRC_QUERY_EMPTY_CNAME">
-<term>DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</term>
+<term>DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</term>
<listitem><para>
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
</para></listitem>
</varlistentry>
@@ -687,15 +2259,15 @@ DNAME is empty (it has no records). This indicates problem with supplied data.
<term>DATASRC_QUERY_FAIL query failed</term>
<listitem><para>
Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
+already and a SERVFAIL will be returned to the querying system.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_QUERY_FOLLOW_CNAME">
<term>DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</term>
<listitem><para>
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
</para></listitem>
</varlistentry>
@@ -744,14 +2316,14 @@ Debug information. The last DO_QUERY is an auth query.
<varlistentry id="DATASRC_QUERY_IS_GLUE">
<term>DATASRC_QUERY_IS_GLUE glue query (%1/%2)</term>
<listitem><para>
-Debug information. The last DO_QUERY is query for glue addresses.
+Debug information. The last DO_QUERY is a query for glue addresses.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_QUERY_IS_NOGLUE">
<term>DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</term>
<listitem><para>
-Debug information. The last DO_QUERY is query for addresses that are not
+Debug information. The last DO_QUERY is a query for addresses that are not
glue.
</para></listitem>
</varlistentry>
@@ -759,7 +2331,7 @@ glue.
<varlistentry id="DATASRC_QUERY_IS_REF">
<term>DATASRC_QUERY_IS_REF query for referral (%1/%2)</term>
<listitem><para>
-Debug information. The last DO_QUERY is query for referral information.
+Debug information. The last DO_QUERY is a query for referral information.
</para></listitem>
</varlistentry>
@@ -806,7 +2378,7 @@ error already.
</varlistentry>
<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_AUTH">
-<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)</term>
<listitem><para>
Debug information. The hotspot cache is ignored for authoritative ANY queries
for consistency reasons.
@@ -814,7 +2386,7 @@ for consistency reasons.
</varlistentry>
<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE">
-<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)</term>
<listitem><para>
Debug information. The hotspot cache is ignored for ANY queries for consistency
reasons.
@@ -852,8 +2424,8 @@ Debug information. A sure query is being processed now.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_QUERY_PROVENX_FAIL">
-<term>DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</term>
+<varlistentry id="DATASRC_QUERY_PROVE_NX_FAIL">
+<term>DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'</term>
<listitem><para>
The user wants DNSSEC and we discovered the entity doesn't exist (either
domain or the record). But there was an error getting NSEC/NSEC3 record
@@ -890,9 +2462,9 @@ error already.
<varlistentry id="DATASRC_QUERY_SYNTH_CNAME">
<term>DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</term>
<listitem><para>
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
</para></listitem>
</varlistentry>
@@ -905,7 +2477,7 @@ already. The code is 1 for error, 2 for not implemented.
</varlistentry>
<varlistentry id="DATASRC_QUERY_TOO_MANY_CNAMES">
-<term>DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</term>
+<term>DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</term>
<listitem><para>
A CNAME led to another CNAME and it led to another, and so on. After 16
CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
@@ -938,8 +2510,8 @@ exact kind was hopefully already reported.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_QUERY_WILDCARD_PROVENX_FAIL">
-<term>DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</term>
+<varlistentry id="DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL">
+<term>DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)</term>
<listitem><para>
While processing a wildcard, it wasn't possible to prove nonexistence of the
given domain or record. The code is 1 for error and 2 for not implemented.
@@ -961,32 +2533,53 @@ Debug information. The SQLite data source is closing the database file.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_SQLITE_CONNCLOSE">
+<term>DATASRC_SQLITE_CONNCLOSE Closing sqlite database</term>
+<listitem><para>
+The database file is no longer needed and is being closed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CONNOPEN">
+<term>DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'</term>
+<listitem><para>
+The database file is being opened so it can start providing data.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_SQLITE_CREATE">
-<term>DATASRC_SQLITE_CREATE sQLite data source created</term>
+<term>DATASRC_SQLITE_CREATE SQLite data source created</term>
<listitem><para>
Debug information. An instance of SQLite data source is being created.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_SQLITE_DESTROY">
-<term>DATASRC_SQLITE_DESTROY sQLite data source destroyed</term>
+<term>DATASRC_SQLITE_DESTROY SQLite data source destroyed</term>
<listitem><para>
Debug information. An instance of SQLite data source is being destroyed.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_SQLITE_DROPCONN">
+<term>DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized</term>
+<listitem><para>
+The object around a database connection is being destroyed.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_SQLITE_ENCLOSURE">
<term>DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</term>
<listitem><para>
-Debug information. The SQLite data source is trying to identify, which zone
+Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOTFOUND">
-<term>DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</term>
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOT_FOUND">
+<term>DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'</term>
<listitem><para>
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
no such zone in our data.
</para></listitem>
</varlistentry>
@@ -1050,7 +2643,7 @@ a referral and where it goes.
<varlistentry id="DATASRC_SQLITE_FINDREF_BAD_CLASS">
<term>DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</term>
<listitem><para>
-The SQLite data source was trying to identify, if there's a referral. But
+The SQLite data source was trying to identify if there's a referral. But
it contains different class than the query was for.
</para></listitem>
</varlistentry>
@@ -1079,6 +2672,13 @@ But it doesn't contain that zone.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_SQLITE_NEWCONN">
+<term>DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized</term>
+<listitem><para>
+A wrapper object to hold database connection is being initialized.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_SQLITE_OPEN">
<term>DATASRC_SQLITE_OPEN opening SQLite database '%1'</term>
<listitem><para>
@@ -1090,15 +2690,22 @@ the provided file.
<varlistentry id="DATASRC_SQLITE_PREVIOUS">
<term>DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</term>
<listitem><para>
-Debug information. We're trying to look up name preceding the supplied one.
+This is a debug message. The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_SQLITE_PREVIOUS_NO_ZONE">
<term>DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</term>
<listitem><para>
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
</para></listitem>
</varlistentry>
@@ -1111,11 +2718,11 @@ no data, but it will be ready for use.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_STATIC_BAD_CLASS">
-<term>DATASRC_STATIC_BAD_CLASS static data source can handle CH only</term>
+<varlistentry id="DATASRC_STATIC_CLASS_NOT_CH">
+<term>DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only</term>
<listitem><para>
-For some reason, someone asked the static data source a query that is not in
-the CH class.
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
</para></listitem>
</varlistentry>
@@ -1143,294 +2750,436 @@ generated.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_ABOVEDBGMAX">
-<term>LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</term>
+<varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
+<term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
<listitem><para>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_BADDEBUG">
-<term>LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</term>
+<varlistentry id="LOGIMPL_BAD_DEBUG_STRING">
+<term>LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</term>
<listitem><para>
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form. In particular,
-it starts DEBUG but does not end with an integer.
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_BELOWDBGMIN">
-<term>LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</term>
+<varlistentry id="LOGIMPL_BELOW_MIN_DEBUG">
+<term>LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</term>
<listitem><para>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADDESTINATION">
-<term>MSG_BADDESTINATION unrecognized log destination: %1</term>
+<varlistentry id="LOG_BAD_DESTINATION">
+<term>LOG_BAD_DESTINATION unrecognized log destination: %1</term>
<listitem><para>
A logger destination value was given that was not recognized. The
destination should be one of "console", "file", or "syslog".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADSEVERITY">
-<term>MSG_BADSEVERITY unrecognized log severity: %1</term>
+<varlistentry id="LOG_BAD_SEVERITY">
+<term>LOG_BAD_SEVERITY unrecognized log severity: %1</term>
<listitem><para>
A logger severity value was given that was not recognized. The severity
-should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADSTREAM">
-<term>MSG_BADSTREAM bad log console output stream: %1</term>
+<varlistentry id="LOG_BAD_STREAM">
+<term>LOG_BAD_STREAM bad log console output stream: %1</term>
<listitem><para>
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_DUPLNS">
-<term>MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</term>
+<varlistentry id="LOG_DUPLICATE_MESSAGE_ID">
+<term>LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</term>
<listitem><para>
-When reading a message file, more than one $NAMESPACE directive was found. In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code. This indicates a
+programming error; please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_DUPMSGID">
-<term>MSG_DUPMSGID duplicate message ID (%1) in compiled code</term>
+<varlistentry id="LOG_DUPLICATE_NAMESPACE">
+<term>LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</term>
<listitem><para>
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules. (All message
-IDs should be unique throughout BIND10.) This has no impact on the operation
-of the server other that erroneous messages may be logged. (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded. However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.) Such a condition is regarded as an
+error and the read will be abandoned.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_IDNOTFND">
-<term>MSG_IDNOTFND could not replace message text for '%1': no such message</term>
+<varlistentry id="LOG_INPUT_OPEN_FAIL">
+<term>LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</term>
+<listitem><para>
+The program was not able to open the specified input message file for
+the reason given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_INVALID_MESSAGE_ID">
+<term>LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</term>
+<listitem><para>
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_EXTRA_ARGS">
+<term>LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</term>
+<listitem><para>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_INVALID_ARG">
+<term>LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<listitem><para>
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_NO_ARGS">
+<term>LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</term>
+<listitem><para>
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_MESSAGE_ID">
+<term>LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</term>
+<listitem><para>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_MESSAGE_TEXT">
+<term>LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</term>
+<listitem><para>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_SUCH_MESSAGE">
+<term>LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</term>
<listitem><para>
During start-up a local message file was read. A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary. Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
</para><para>
-This message may appear a number of times in the file, once for every such
-unknown message identification.
+There may be several reasons why this message may appear:
+</para><para>
+- The message ID has been mis-spelled in the local message file.
+</para><para>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</para><para>
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+</para><para>
+Whatever the reason, there is no impact on the operation of BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_OPEN_OUTPUT_FAIL">
+<term>LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</term>
+<listitem><para>
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_INVMSGID">
-<term>MSG_INVMSGID line %1: invalid message identification '%2'</term>
+<varlistentry id="LOG_PREFIX_EXTRA_ARGS">
+<term>LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</term>
<listitem><para>
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NOMSGID">
-<term>MSG_NOMSGID line %1: message definition line found without a message ID</term>
+<varlistentry id="LOG_PREFIX_INVALID_ARG">
+<term>LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
<listitem><para>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NOMSGTXT">
-<term>MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</term>
+<varlistentry id="LOG_READING_LOCAL_FILE">
+<term>LOG_READING_LOCAL_FILE reading local message file %1</term>
<listitem><para>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
+This is an informational message output by BIND 10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSEXTRARG">
-<term>MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</term>
+<varlistentry id="LOG_READ_ERROR">
+<term>LOG_READ_ERROR error reading from message file %1: %2</term>
<listitem><para>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
+The specified error was encountered reading from the named message file.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSINVARG">
-<term>MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<varlistentry id="LOG_UNRECOGNISED_DIRECTIVE">
+<term>LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</term>
<listitem><para>
-The $NAMESPACE argument should be a valid C++ namespace. The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct. The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSNOARG">
-<term>MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</term>
+<varlistentry id="LOG_WRITE_ERROR">
+<term>LOG_WRITE_ERROR error writing to %1: %2</term>
<listitem><para>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
+The specified error was encountered by the message compiler when writing
+to the named output file.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_OPENIN">
-<term>MSG_OPENIN unable to open message file %1 for input: %2</term>
+<varlistentry id="NOTIFY_OUT_INVALID_ADDRESS">
+<term>NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</term>
<listitem><para>
-The program was not able to open the specified input message file for the
-reason given.
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_OPENOUT">
-<term>MSG_OPENOUT unable to open %1 for output: %2</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_OPCODE">
+<term>NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3</term>
<listitem><para>
-The program was not able to open the specified output file for the reason
-given.
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_PRFEXTRARG">
-<term>MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_QID">
+<term>NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4</term>
<listitem><para>
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created. This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_PRFINVARG">
-<term>MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_QUERY_NAME">
+<term>NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4</term>
<listitem><para>
-The $PREFIX argument is used in a symbol name in a C++ header file. As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_RDLOCMES">
-<term>MSG_RDLOCMES reading local message file %1</term>
+<varlistentry id="NOTIFY_OUT_REPLY_QR_NOT_SET">
+<term>NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2</term>
<listitem><para>
-This is an informational message output by BIND10 when it starts to read a
-local message file. (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_READERR">
-<term>MSG_READERR error reading from message file %1: %2</term>
+<varlistentry id="NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION">
+<term>NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1</term>
<listitem><para>
-The specified error was encountered reading from the named message file.
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_RETRY_EXCEEDED">
+<term>NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded</term>
+<listitem><para>
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_SENDING_NOTIFY">
+<term>NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2</term>
+<listitem><para>
+A notify message is sent to the secondary nameserver at the given
+address.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_UNRECDIR">
-<term>MSG_UNRECDIR line %1: unrecognised directive '%2'</term>
+<varlistentry id="NOTIFY_OUT_SOCKET_ERROR">
+<term>NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3</term>
<listitem><para>
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_WRITERR">
-<term>MSG_WRITERR error writing to %1: %2</term>
+<varlistentry id="NOTIFY_OUT_SOCKET_RECV_ERROR">
+<term>NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3</term>
<listitem><para>
-The specified error was encountered by the message compiler when writing to
-the named output file.
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_INVRESPSTR">
-<term>NSAS_INVRESPSTR queried for %1 but got invalid response</term>
+<varlistentry id="NOTIFY_OUT_TIMEOUT">
+<term>NOTIFY_OUT_TIMEOUT retry notify to %1#%2</term>
<listitem><para>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for a RR for the
-specified nameserver but received an invalid response. Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_INVRESPTC">
-<term>NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<varlistentry id="NSAS_FIND_NS_ADDRESS">
+<term>NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</term>
<listitem><para>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_LOOKUPCANCEL">
-<term>NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</term>
+<varlistentry id="NSAS_FOUND_ADDRESS">
+<term>NSAS_FOUND_ADDRESS found address %1 for %2</term>
<listitem><para>
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_LOOKUPZONE">
-<term>NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</term>
+<varlistentry id="NSAS_INVALID_RESPONSE">
+<term>NSAS_INVALID_RESPONSE queried for %1 but got invalid response</term>
<listitem><para>
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</para><para>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSADDR">
-<term>NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</term>
+<varlistentry id="NSAS_LOOKUP_CANCEL">
+<term>NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSLKUPFAIL">
-<term>NSAS_NSLKUPFAIL failed to lookup any %1 for %2</term>
+<varlistentry id="NSAS_NS_LOOKUP_FAIL">
+<term>NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver. This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSLKUPSUCC">
-<term>NSAS_NSLKUPSUCC found address %1 for %2</term>
+<varlistentry id="NSAS_SEARCH_ZONE_NS">
+<term>NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_SETRTT">
-<term>NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</term>
+<varlistentry id="NSAS_UPDATE_RTT">
+<term>NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</term>
<listitem><para>
A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver. The RTT has been updated using the value given and the new RTT is
-displayed. (The RTT is subject to a calculation that damps out sudden
-changes. As a result, the new RTT is not necessarily equal to the RTT
-reported.)
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_WRONG_ANSWER">
+<term>NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<listitem><para>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</para><para>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</para></listitem>
</varlistentry>
@@ -1460,16 +3209,16 @@ type> tuple in the cache; instead, the deepest delegation found is indicated.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_FOLLOWCNAME">
-<term>RESLIB_FOLLOWCNAME following CNAME chain to <%1></term>
+<varlistentry id="RESLIB_FOLLOW_CNAME">
+<term>RESLIB_FOLLOW_CNAME following CNAME chain to <%1></term>
<listitem><para>
A debug message, a CNAME response was received and another query is being issued
for the <name, class, type> tuple.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_LONGCHAIN">
-<term>RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
+<varlistentry id="RESLIB_LONG_CHAIN">
+<term>RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
<listitem><para>
A debug message recording that a CNAME response has been received to an upstream
query for the specified question (Previous debug messages will have indicated
@@ -1479,26 +3228,26 @@ is where on CNAME points to another) and so an error is being returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NONSRRSET">
-<term>RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></term>
+<varlistentry id="RESLIB_NO_NS_RRSET">
+<term>RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></term>
<listitem><para>
A debug message, this indicates that a response was received for the specified
-query and was categorised as a referral. However, the received message did
+query and was categorized as a referral. However, the received message did
not contain any NS RRsets. This may indicate a programming error in the
response classification code.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NSASLOOK">
-<term>RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</term>
+<varlistentry id="RESLIB_NSAS_LOOKUP">
+<term>RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</term>
<listitem><para>
A debug message, the RunningQuery object is querying the NSAS for the
nameservers for the specified zone.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NXDOMRR">
-<term>RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
+<varlistentry id="RESLIB_NXDOM_NXRR">
+<term>RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
<listitem><para>
A debug message recording that either a NXDOMAIN or an NXRRSET response has
been received to an upstream query for the specified question. Previous debug
@@ -1514,8 +3263,8 @@ are no retries left, an error will be reported.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_PROTOCOLRTRY">
-<term>RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</term>
+<varlistentry id="RESLIB_PROTOCOL_RETRY">
+<term>RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</term>
<listitem><para>
A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
@@ -1523,33 +3272,16 @@ repeated query, there will be the indicated number of retries left.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RCODERR">
-<term>RESLIB_RCODERR RCODE indicates error in response to query for <%1></term>
+<varlistentry id="RESLIB_RCODE_ERR">
+<term>RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></term>
<listitem><para>
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_REFERRAL">
-<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
-<listitem><para>
-A debug message recording that a referral response has been received to an
-upstream query for the specified question. Previous debug messages will
-have indicated the server to which the question was sent.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="RESLIB_REFERZONE">
-<term>RESLIB_REFERZONE referred to zone %1</term>
-<listitem><para>
-A debug message indicating that the last referral message was to the specified
-zone.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="RESLIB_RESCAFND">
-<term>RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RECQ_CACHE_FIND">
+<term>RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</term>
<listitem><para>
This is a debug message and indicates that a RecursiveQuery object found the
the specified <name, class, type> tuple in the cache. The instance number
@@ -1558,8 +3290,8 @@ been called.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RESCANOTFND">
-<term>RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RECQ_CACHE_NO_FIND">
+<term>RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
<listitem><para>
This is a debug message and indicates that the look in the cache made by the
RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
@@ -1569,6 +3301,23 @@ been called.
</para></listitem>
</varlistentry>
+<varlistentry id="RESLIB_REFERRAL">
+<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFER_ZONE">
+<term>RESLIB_REFER_ZONE referred to zone %1</term>
+<listitem><para>
+A debug message indicating that the last referral message was to the specified
+zone.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="RESLIB_RESOLVE">
<term>RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</term>
<listitem><para>
@@ -1579,8 +3328,8 @@ message indicates which of the two resolve() methods has been called.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RRSETFND">
-<term>RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RRSET_FOUND">
+<term>RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
<listitem><para>
A debug message, indicating that when RecursiveQuery::resolve queried the
cache, a single RRset was found which was put in the answer. The instance
@@ -1596,16 +3345,16 @@ A debug message giving the round-trip time of the last query and response.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNCAFND">
-<term>RESLIB_RUNCAFND found <%1> in the cache</term>
+<varlistentry id="RESLIB_RUNQ_CACHE_FIND">
+<term>RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</term>
<listitem><para>
This is a debug message and indicates that a RunningQuery object found
the specified <name, class, type> tuple in the cache.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNCALOOK">
-<term>RESLIB_RUNCALOOK looking up up <%1> in the cache</term>
+<varlistentry id="RESLIB_RUNQ_CACHE_LOOKUP">
+<term>RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</term>
<listitem><para>
This is a debug message and indicates that a RunningQuery object has made
a call to its doLookup() method to look up the specified <name, class, type>
@@ -1613,16 +3362,16 @@ tuple, the first action of which will be to examine the cache.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNQUFAIL">
-<term>RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</term>
+<varlistentry id="RESLIB_RUNQ_FAIL">
+<term>RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</term>
<listitem><para>
A debug message indicating that a RunningQuery's failure callback has been
called because all nameservers for the zone in question are unreachable.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNQUSUCC">
-<term>RESLIB_RUNQUSUCC success callback - sending query to %1</term>
+<varlistentry id="RESLIB_RUNQ_SUCCESS">
+<term>RESLIB_RUNQ_SUCCESS success callback - sending query to %1</term>
<listitem><para>
A debug message indicating that a RunningQuery's success callback has been
called because a nameserver has been found, and that a query is being sent
@@ -1630,19 +3379,19 @@ to the specified nameserver.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TESTSERV">
-<term>RESLIB_TESTSERV setting test server to %1(%2)</term>
+<varlistentry id="RESLIB_TEST_SERVER">
+<term>RESLIB_TEST_SERVER setting test server to %1(%2)</term>
<listitem><para>
-This is an internal debugging message and is only generated in unit tests.
-It indicates that all upstream queries from the resolver are being routed to
-the specified server, regardless of the address of the nameserver to which
-the query would normally be routed. As it should never be seen in normal
-operation, it is a warning message instead of a debug message.
+This is a warning message only generated in unit tests. It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed. If seen during normal operation,
+please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TESTUPSTR">
-<term>RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</term>
+<varlistentry id="RESLIB_TEST_UPSTREAM">
+<term>RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</term>
<listitem><para>
This is a debug message and should only be seen in unit tests. A query for
the specified <name, class, type> tuple is being sent to a test nameserver
@@ -1653,13 +3402,13 @@ whose address is given in the message.
<varlistentry id="RESLIB_TIMEOUT">
<term>RESLIB_TIMEOUT query <%1> to %2 timed out</term>
<listitem><para>
-A debug message indicating that the specified query has timed out and as
-there are no retries left, an error will be reported.
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TIMEOUTRTRY">
-<term>RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
+<varlistentry id="RESLIB_TIMEOUT_RETRY">
+<term>RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
<listitem><para>
A debug message indicating that the specified query has timed out and that
the resolver is repeating the query to the same nameserver. After this
@@ -1685,308 +3434,374 @@ tuple is being sent to a nameserver whose address is given in the message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_AXFRTCP">
-<term>RESOLVER_AXFRTCP AXFR request received over TCP</term>
+<varlistentry id="RESOLVER_AXFR_TCP">
+<term>RESOLVER_AXFR_TCP AXFR request received over TCP</term>
<listitem><para>
-A debug message, the resolver received a NOTIFY message over TCP. The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_AXFRUDP">
-<term>RESOLVER_AXFRUDP AXFR request received over UDP</term>
+<varlistentry id="RESOLVER_AXFR_UDP">
+<term>RESOLVER_AXFR_UDP AXFR request received over UDP</term>
<listitem><para>
-A debug message, the resolver received a NOTIFY message over UDP. The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CLTMOSMALL">
-<term>RESOLVER_CLTMOSMALL client timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_CLIENT_TIME_SMALL">
+<term>RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</term>
<listitem><para>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGCHAN">
-<term>RESOLVER_CONFIGCHAN configuration channel created</term>
+<varlistentry id="RESOLVER_CONFIG_CHANNEL">
+<term>RESOLVER_CONFIG_CHANNEL configuration channel created</term>
<listitem><para>
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGERR">
-<term>RESOLVER_CONFIGERR error in configuration: %1</term>
+<varlistentry id="RESOLVER_CONFIG_ERROR">
+<term>RESOLVER_CONFIG_ERROR error in configuration: %1</term>
<listitem><para>
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error). The reason for the error, given as a parameter in the message,
-will give more details.
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGLOAD">
-<term>RESOLVER_CONFIGLOAD configuration loaded</term>
+<varlistentry id="RESOLVER_CONFIG_LOADED">
+<term>RESOLVER_CONFIG_LOADED configuration loaded</term>
<listitem><para>
-A debug message, output when the resolver configuration has been successfully
-loaded.
+This is a debug message output when the resolver configuration has been
+successfully loaded.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGUPD">
-<term>RESOLVER_CONFIGUPD configuration updated: %1</term>
+<varlistentry id="RESOLVER_CONFIG_UPDATED">
+<term>RESOLVER_CONFIG_UPDATED configuration updated: %1</term>
<listitem><para>
-A debug message, the configuration has been updated with the specified
-information.
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
</para></listitem>
</varlistentry>
<varlistentry id="RESOLVER_CREATED">
<term>RESOLVER_CREATED main resolver object created</term>
<listitem><para>
-A debug message, output when the Resolver() object has been created.
+This is a debug message indicating that the main resolver object has
+been created.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_DNSMSGRCVD">
-<term>RESOLVER_DNSMSGRCVD DNS message received: %1</term>
+<varlistentry id="RESOLVER_DNS_MESSAGE_RECEIVED">
+<term>RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</term>
<listitem><para>
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
+This is a debug message from the resolver listing the contents of a
+received DNS message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_DNSMSGSENT">
-<term>RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</term>
+<varlistentry id="RESOLVER_DNS_MESSAGE_SENT">
+<term>RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</term>
<listitem><para>
-A debug message, this contains details of the response sent back to the querying
-system.
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
</para></listitem>
</varlistentry>
<varlistentry id="RESOLVER_FAILED">
<term>RESOLVER_FAILED resolver failed, reason: %1</term>
<listitem><para>
-This is an error message output when an unhandled exception is caught by the
-resolver. All it can do is to shut down.
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_FWDADDR">
-<term>RESOLVER_FWDADDR setting forward address %1(%2)</term>
+<varlistentry id="RESOLVER_FORWARD_ADDRESS">
+<term>RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</term>
<listitem><para>
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_FWDQUERY">
-<term>RESOLVER_FWDQUERY processing forward query</term>
+<varlistentry id="RESOLVER_FORWARD_QUERY">
+<term>RESOLVER_FORWARD_QUERY processing forward query</term>
<listitem><para>
-The received query has passed all checks and is being forwarded to upstream
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
servers.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_HDRERR">
-<term>RESOLVER_HDRERR message received, exception when processing header: %1</term>
+<varlistentry id="RESOLVER_HEADER_ERROR">
+<term>RESOLVER_HEADER_ERROR message received, exception when processing header: %1</term>
<listitem><para>
-A debug message noting that an exception occurred during the processing of
-a received packet. The packet has been dropped.
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
</para></listitem>
</varlistentry>
<varlistentry id="RESOLVER_IXFR">
<term>RESOLVER_IXFR IXFR request received</term>
<listitem><para>
-The resolver received a NOTIFY message over TCP. The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_LKTMOSMALL">
-<term>RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_LOOKUP_TIME_SMALL">
+<term>RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</term>
<listitem><para>
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NFYNOTAUTH">
-<term>RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</term>
+<varlistentry id="RESOLVER_MESSAGE_ERROR">
+<term>RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</term>
<listitem><para>
-The resolver received a NOTIFY message. As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NORMQUERY">
-<term>RESOLVER_NORMQUERY processing normal query</term>
+<varlistentry id="RESOLVER_NEGATIVE_RETRIES">
+<term>RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</term>
<listitem><para>
-The received query has passed all checks and is being processed by the resolver.
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOROOTADDR">
-<term>RESOLVER_NOROOTADDR no root addresses available</term>
+<varlistentry id="RESOLVER_NON_IN_PACKET">
+<term>RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</term>
<listitem><para>
-A warning message during startup, indicates that no root addresses have been
-set. This may be because the resolver will get them from a priming query.
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOTIN">
-<term>RESOLVER_NOTIN non-IN class request received, returning REFUSED message</term>
+<varlistentry id="RESOLVER_NORMAL_QUERY">
+<term>RESOLVER_NORMAL_QUERY processing normal query</term>
<listitem><para>
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOTONEQUES">
-<term>RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</term>
+<varlistentry id="RESOLVER_NOTIFY_RECEIVED">
+<term>RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</term>
<listitem><para>
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message. This is a malformed
-message, as a DNS query must contain only one question. The resolver will
-return a message to the sender with the RCODE set to FORMERR.
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_OPCODEUNS">
-<term>RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</term>
+<varlistentry id="RESOLVER_NOT_ONE_QUESTION">
+<term>RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</term>
<listitem><para>
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes). It will return a message to the sender
-with the RCODE set to NOTIMP.
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PARSEERR">
-<term>RESOLVER_PARSEERR error parsing received message: %1 - returning %2</term>
+<varlistentry id="RESOLVER_NO_ROOT_ADDRESS">
+<term>RESOLVER_NO_ROOT_ADDRESS no root addresses available</term>
<listitem><para>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded). The message parameters give
-a textual description of the problem and the RCODE returned.
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PRINTMSG">
-<term>RESOLVER_PRINTMSG print message command, aeguments are: %1</term>
+<varlistentry id="RESOLVER_PARSE_ERROR">
+<term>RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</term>
<listitem><para>
-This message is logged when a "print_message" command is received over the
-command channel.
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PROTERR">
-<term>RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</term>
+<varlistentry id="RESOLVER_PRINT_COMMAND">
+<term>RESOLVER_PRINT_COMMAND print message command, arguments are: %1</term>
<listitem><para>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded). The message parameters give a textual
-description of the problem and the RCODE returned.
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUSETUP">
-<term>RESOLVER_QUSETUP query setup</term>
+<varlistentry id="RESOLVER_PROTOCOL_ERROR">
+<term>RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</term>
<listitem><para>
-A debug message noting that the resolver is creating a RecursiveQuery object.
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUSHUT">
-<term>RESOLVER_QUSHUT query shutdown</term>
+<varlistentry id="RESOLVER_QUERY_ACCEPTED">
+<term>RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</term>
<listitem><para>
-A debug message noting that the resolver is destroying a RecursiveQuery object.
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUTMOSMALL">
-<term>RESOLVER_QUTMOSMALL query timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_QUERY_DROPPED">
+<term>RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</term>
<listitem><para>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RECURSIVE">
-<term>RESOLVER_RECURSIVE running in recursive mode</term>
+<varlistentry id="RESOLVER_QUERY_REJECTED">
+<term>RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</term>
<listitem><para>
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RECVMSG">
-<term>RESOLVER_RECVMSG resolver has received a DNS message</term>
+<varlistentry id="RESOLVER_QUERY_SETUP">
+<term>RESOLVER_QUERY_SETUP query setup</term>
<listitem><para>
-A debug message indicating that the resolver has received a message. Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_SHUTDOWN">
+<term>RESOLVER_QUERY_SHUTDOWN query shutdown</term>
+<listitem><para>
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_TIME_SMALL">
+<term>RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</term>
+<listitem><para>
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RETRYNEG">
-<term>RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</term>
+<varlistentry id="RESOLVER_RECEIVED_MESSAGE">
+<term>RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</term>
<listitem><para>
-An error message indicating that the resolver configuration has specified a
-negative retry count. Only zero or positive values are valid.
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_ROOTADDR">
-<term>RESOLVER_ROOTADDR setting root address %1(%2)</term>
+<varlistentry id="RESOLVER_RECURSIVE">
+<term>RESOLVER_RECURSIVE running in recursive mode</term>
<listitem><para>
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_SERVICE">
-<term>RESOLVER_SERVICE service object created</term>
+<varlistentry id="RESOLVER_SERVICE_CREATED">
+<term>RESOLVER_SERVICE_CREATED service object created</term>
<listitem><para>
-A debug message, output when the main service object (which handles the
-received queries) is created.
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_SETPARAM">
-<term>RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
+<varlistentry id="RESOLVER_SET_PARAMS">
+<term>RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
<listitem><para>
-A debug message, lists the parameters associated with the message. These are:
+This debug message lists the parameters being set for the resolver. These are:
query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers. Client timeout: the interval to resolver a query by
+to upstream servers. Client timeout: the interval to resolve a query by
a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
+whilst continuing to resolve the query. Lookup timeout: the time at which the
resolver gives up trying to resolve a query. Retry count: the number of times
the resolver will retry a query to an upstream server if it gets a timeout.
</para><para>
The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
upstream nameservers. Even if none of these queries timeout, the total time
taken to perform all the queries may exceed the client timeout. When this
happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache. However,
-there comes a time - the lookup timeout - when even the resolve gives up.
+with the resolution process; data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
At this point it will wait for pending upstream queries to complete or
timeout and drop the query.
</para></listitem>
</varlistentry>
+<varlistentry id="RESOLVER_SET_QUERY_ACL">
+<term>RESOLVER_SET_QUERY_ACL query ACL is configured</term>
+<listitem><para>
+This debug message is generated when a new query ACL is configured for
+the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SET_ROOT_ADDRESS">
+<term>RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</term>
+<listitem><para>
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="RESOLVER_SHUTDOWN">
<term>RESOLVER_SHUTDOWN resolver shutdown complete</term>
<listitem><para>
-This information message is output when the resolver has shut down.
+This informational message is output when the resolver has shut down.
</para></listitem>
</varlistentry>
@@ -2005,11 +3820,982 @@ An informational message, this is output when the resolver starts up.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_UNEXRESP">
-<term>RESOLVER_UNEXRESP received unexpected response, ignoring</term>
+<varlistentry id="RESOLVER_UNEXPECTED_RESPONSE">
+<term>RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</term>
+<listitem><para>
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_UNSUPPORTED_OPCODE">
+<term>RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</term>
+<listitem><para>
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESSES_NOT_LIST">
+<term>SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1</term>
+<listitem><para>
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_FAIL">
+<term>SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)</term>
+<listitem><para>
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_MISSING">
+<term>SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1</term>
+<listitem><para>
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_TYPE">
+<term>SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1</term>
+<listitem><para>
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_UNRECOVERABLE">
+<term>SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)</term>
+<listitem><para>
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+</para><para>
+The condition indicates problems with the server and/or the system on
+which it is running. The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_VALUE">
+<term>SRVCOMM_ADDRESS_VALUE address to set: %1#%2</term>
+<listitem><para>
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_DEINIT">
+<term>SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring</term>
+<listitem><para>
+Debug message indicating that the server is deinitializing the TSIG keyring.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_INIT">
+<term>SRVCOMM_KEYS_INIT initializing TSIG keyring</term>
+<listitem><para>
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_UPDATE">
+<term>SRVCOMM_KEYS_UPDATE updating TSIG keyring</term>
+<listitem><para>
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_PORT_RANGE">
+<term>SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)</term>
+<listitem><para>
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_SET_LISTEN">
+<term>SRVCOMM_SET_LISTEN setting addresses to listen to</term>
+<listitem><para>
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_BAD_OPTION_VALUE">
+<term>STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1</term>
+<listitem><para>
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CC_SESSION_ERROR">
+<term>STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1</term>
+<listitem><para>
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CLOSING">
+<term>STATHTTPD_CLOSING closing %1#%2</term>
+<listitem><para>
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CLOSING_CC_SESSION">
+<term>STATHTTPD_CLOSING_CC_SESSION stopping cc session</term>
+<listitem><para>
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_HANDLE_CONFIG">
+<term>STATHTTPD_HANDLE_CONFIG reading configuration: %1</term>
+<listitem><para>
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_SHUTDOWN_COMMAND">
+<term>STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_STATUS_COMMAND">
+<term>STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status</term>
+<listitem><para>
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_UNKNOWN_COMMAND">
+<term>STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</term>
+<listitem><para>
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_ERROR">
+<term>STATHTTPD_SERVER_ERROR HTTP server error: %1</term>
+<listitem><para>
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_INIT_ERROR">
+<term>STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1</term>
+<listitem><para>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SHUTDOWN">
+<term>STATHTTPD_SHUTDOWN shutting down</term>
+<listitem><para>
+The stats-httpd daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STARTED">
+<term>STATHTTPD_STARTED listening on %1#%2</term>
+<listitem><para>
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STARTING_CC_SESSION">
+<term>STATHTTPD_STARTING_CC_SESSION starting cc session</term>
+<listitem><para>
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_START_SERVER_INIT_ERROR">
+<term>STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1</term>
+<listitem><para>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STOPPED_BY_KEYBOARD">
+<term>STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_UNKNOWN_CONFIG_ITEM">
+<term>STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1</term>
+<listitem><para>
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_BAD_OPTION_VALUE">
+<term>STATS_BAD_OPTION_VALUE bad command line argument: %1</term>
+<listitem><para>
+The stats module was called with a bad command-line argument and will
+not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_CC_SESSION_ERROR">
+<term>STATS_CC_SESSION_ERROR error connecting to message bus: %1</term>
+<listitem><para>
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_NEW_CONFIG">
+<term>STATS_RECEIVED_NEW_CONFIG received new configuration: %1</term>
+<listitem><para>
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_REMOVE_COMMAND">
+<term>STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</term>
+<listitem><para>
+A remove command for the given name was sent to the stats module, and
+the given statistics value will now be removed. It will not appear in
+statistics reports until it appears in a statistics update from a
+module again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_RESET_COMMAND">
+<term>STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</term>
+<listitem><para>
+The stats module received a command to clear all collected statistics.
+The data is cleared until it receives an update from the modules again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOW_ALL_COMMAND">
+<term>STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</term>
+<listitem><para>
+The stats module received a command to show all statistics that it has
+collected.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOW_NAME_COMMAND">
+<term>STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1</term>
+<listitem><para>
+The stats module received a command to show the statistics that it has
+collected for the given item.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHUTDOWN_COMMAND">
+<term>STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+A shutdown command was sent to the stats module and it will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_STATUS_COMMAND">
+<term>STATS_RECEIVED_STATUS_COMMAND received command to return status</term>
+<listitem><para>
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_UNKNOWN_COMMAND">
+<term>STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</term>
+<listitem><para>
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_SEND_REQUEST_BOSS">
+<term>STATS_SEND_REQUEST_BOSS requesting boss to send statistics</term>
+<listitem><para>
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_STOPPED_BY_KEYBOARD">
+<term>STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_UNKNOWN_COMMAND_IN_SPEC">
+<term>STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1</term>
+<listitem><para>
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_DATABASE_FAILURE">
+<term>XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_INTERNAL_FAILURE">
+<term>XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_FAILURE">
+<term>XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_STARTED">
+<term>XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</term>
+<listitem><para>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_SUCCESS">
+<term>XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</term>
+<listitem><para>
+The AXFR transfer of the given zone was successfully completed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_ADDR_FORMAT">
+<term>XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</term>
+<listitem><para>
+The given master address is not a valid IP address.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_PORT_FORMAT">
+<term>XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</term>
+<listitem><para>
+The master port as read from the configuration is not a valid port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_TSIG_KEY_STRING">
+<term>XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_ZONE_CLASS">
+<term>XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</term>
+<listitem><para>
+The zone class as read from the configuration is not a valid DNS class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CC_SESSION_ERROR">
+<term>XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_COMMAND_ERROR">
+<term>XFRIN_COMMAND_ERROR error while executing command '%1': %2</term>
+<listitem><para>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CONNECT_MASTER">
+<term>XFRIN_CONNECT_MASTER error connecting to master at %1: %2</term>
+<listitem><para>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_IMPORT_DNS">
+<term>XFRIN_IMPORT_DNS error importing python DNS module: %1</term>
+<listitem><para>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR">
+<term>XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</term>
+<listitem><para>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER">
+<term>XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</term>
+<listitem><para>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_RETRANSFER_UNKNOWN_ZONE">
+<term>XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</term>
+<listitem><para>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STARTING">
+<term>XFRIN_STARTING starting resolver with command line '%1'</term>
+<listitem><para>
+An informational message, this is output when the resolver starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STOPPED_BY_KEYBOARD">
+<term>XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_UNKNOWN_ERROR">
+<term>XFRIN_UNKNOWN_ERROR unknown error: %1</term>
+<listitem><para>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_DONE">
+<term>XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</term>
+<listitem><para>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_ERROR">
+<term>XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</term>
+<listitem><para>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_FAILED">
+<term>XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</term>
+<listitem><para>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_STARTED">
+<term>XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</term>
+<listitem><para>
+A transfer out of the given zone has started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_BAD_TSIG_KEY_STRING">
+<term>XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_ERROR">
+<term>XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_TIMEOUT_ERROR">
+<term>XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</term>
+<listitem><para>
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_FETCH_REQUEST_ERROR">
+<term>XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</term>
+<listitem><para>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_HANDLE_QUERY_ERROR">
+<term>XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</term>
+<listitem><para>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IMPORT">
+<term>XFROUT_IMPORT error importing python module: %1</term>
+<listitem><para>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG">
+<term>XFROUT_NEW_CONFIG Update xfrout configuration</term>
+<listitem><para>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG_DONE">
+<term>XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</term>
+<listitem><para>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NOTIFY_COMMAND">
+<term>XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</term>
+<listitem><para>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PARSE_QUERY_ERROR">
+<term>XFROUT_PARSE_QUERY_ERROR error parsing query: %1</term>
+<listitem><para>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PROCESS_REQUEST_ERROR">
+<term>XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</term>
+<listitem><para>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_DROPPED">
+<term>XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</term>
+<listitem><para>
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_REJECTED">
+<term>XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</term>
+<listitem><para>
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVED_SHUTDOWN_COMMAND">
+<term>XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR">
+<term>XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</term>
+<listitem><para>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</term>
+<listitem><para>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</term>
+<listitem><para>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_SOCKET_SELECT_ERROR">
+<term>XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</term>
+<listitem><para>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPED_BY_KEYBOARD">
+<term>XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPING">
+<term>XFROUT_STOPPING the xfrout daemon is shutting down</term>
+<listitem><para>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_UNIX_SOCKET_FILE_IN_USE">
+<term>XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</term>
+<listitem><para>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_CCSESSION_ERROR">
+<term>ZONEMGR_CCSESSION_ERROR command channel session error: %1</term>
+<listitem><para>
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_JITTER_TOO_BIG">
+<term>ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5</term>
+<listitem><para>
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_KEYBOARD_INTERRUPT">
+<term>ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt</term>
+<listitem><para>
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_LOAD_ZONE">
+<term>ZONEMGR_LOAD_ZONE loading zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_MASTER_ADDRESS">
+<term>ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master</term>
+<listitem><para>
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_SOA">
+<term>ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record</term>
+<listitem><para>
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_TIMER_THREAD">
+<term>ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running</term>
+<listitem><para>
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_ZONE_CLASS">
+<term>ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone</term>
+<listitem><para>
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_ZONE_NAME">
+<term>ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone</term>
+<listitem><para>
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_NOTIFY">
+<term>ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_SHUTDOWN">
+<term>ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_UNKNOWN">
+<term>ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'</term>
+<listitem><para>
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_XFRIN_FAILED">
+<term>ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_XFRIN_SUCCESS">
+<term>ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_REFRESH_ZONE">
+<term>ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)</term>
+<listitem><para>
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SELECT_ERROR">
+<term>ZONEMGR_SELECT_ERROR error with select(): %1</term>
+<listitem><para>
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SEND_FAIL">
+<term>ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed</term>
+<listitem><para>
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SESSION_ERROR">
+<term>ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon</term>
+<listitem><para>
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SESSION_TIMEOUT">
+<term>ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon</term>
+<listitem><para>
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SHUTDOWN">
+<term>ZONEMGR_SHUTDOWN zone manager has shut down</term>
+<listitem><para>
+A debug message, output when the zone manager has shut down completely.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_STARTING">
+<term>ZONEMGR_STARTING zone manager starting</term>
+<listitem><para>
+A debug message output when the zone manager starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_TIMER_THREAD_RUNNING">
+<term>ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running</term>
+<listitem><para>
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_FAIL">
+<term>ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_NOTIFIED">
+<term>ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_SUCCESS">
+<term>ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager</term>
<listitem><para>
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
</para></listitem>
</varlistentry>
</variablelist>
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index e3128b5..4d8ec83 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -50,6 +50,12 @@ b10_auth_SOURCES += command.cc command.h
b10_auth_SOURCES += common.h common.cc
b10_auth_SOURCES += statistics.cc statistics.h
b10_auth_SOURCES += main.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+b10_auth_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc
EXTRA_DIST += auth_messages.mes
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
index 9f04b76..1ffa687 100644
--- a/src/bin/auth/auth_messages.mes
+++ b/src/bin/auth/auth_messages.mes
@@ -257,4 +257,7 @@ request. The zone manager component has been informed of the request,
but has returned an error response (which is included in the message). The
NOTIFY request will not be honored.
+% AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 5a31442..c9dac88 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -125,6 +125,10 @@ public:
/// The TSIG keyring
const shared_ptr<TSIGKeyRing>* keyring_;
+
+ /// Bind the ModuleSpec object in config_session_ with
+ /// isc:config::ModuleSpec::validateStatistics.
+ void registerStatisticsValidator();
private:
std::string db_file_;
@@ -139,6 +143,9 @@ private:
/// Increment query counter
void incCounter(const int protocol);
+
+ // validateStatistics
+ bool validateStatistics(isc::data::ConstElementPtr data) const;
};
AuthSrvImpl::AuthSrvImpl(const bool use_cache,
@@ -317,6 +324,7 @@ AuthSrv::setXfrinSession(AbstractSession* xfrin_session) {
void
AuthSrv::setConfigSession(ModuleCCSession* config_session) {
impl_->config_session_ = config_session;
+ impl_->registerStatisticsValidator();
}
void
@@ -670,6 +678,22 @@ AuthSrvImpl::incCounter(const int protocol) {
}
}
+void
+AuthSrvImpl::registerStatisticsValidator() {
+ counters_.registerStatisticsValidator(
+ boost::bind(&AuthSrvImpl::validateStatistics, this, _1));
+}
+
+bool
+AuthSrvImpl::validateStatistics(isc::data::ConstElementPtr data) const {
+ if (config_session_ == NULL) {
+ return (false);
+ }
+ return (
+ config_session_->getModuleSpec().validateStatistics(
+ data, true));
+}
+
ConstElementPtr
AuthSrvImpl::setDbFile(ConstElementPtr config) {
ConstElementPtr answer = isc::config::createAnswer();
diff --git a/src/bin/auth/b10-auth.8 b/src/bin/auth/b10-auth.8
index 0356683..aedadee 100644
--- a/src/bin/auth/b10-auth.8
+++ b/src/bin/auth/b10-auth.8
@@ -2,12 +2,12 @@
.\" Title: b10-auth
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 8, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-AUTH" "8" "March 8, 2011" "BIND10" "BIND10"
+.TH "B10\-AUTH" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -70,18 +70,6 @@ defines the path to the SQLite3 zone file when using the sqlite datasource\&. Th
/usr/local/var/bind10\-devel/zone\&.sqlite3\&.
.PP
-\fIlisten_on\fR
-is a list of addresses and ports for
-\fBb10\-auth\fR
-to listen on\&. The list items are the
-\fIaddress\fR
-string and
-\fIport\fR
-number\&. By default,
-\fBb10\-auth\fR
-listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
-.PP
-
\fIdatasources\fR
configures data sources\&. The list items include:
\fItype\fR
@@ -114,6 +102,18 @@ In this development version, currently this is only used for the memory data sou
.RE
.PP
+\fIlisten_on\fR
+is a list of addresses and ports for
+\fBb10\-auth\fR
+to listen on\&. The list items are the
+\fIaddress\fR
+string and
+\fIport\fR
+number\&. By default,
+\fBb10\-auth\fR
+listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
+.PP
+
\fIstatistics\-interval\fR
is the timer interval in seconds for
\fBb10\-auth\fR
@@ -164,6 +164,25 @@ immediately\&.
\fBshutdown\fR
exits
\fBb10\-auth\fR\&. (Note that the BIND 10 boss process will restart this service\&.)
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+auth\&.queries\&.tcp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over TCP since startup\&.
+.RE
+.PP
+auth\&.queries\&.udp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over UDP since startup\&.
+.RE
.SH "FILES"
.PP
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index 2b53394..636f437 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 8, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -132,15 +132,6 @@
</para>
<para>
- <varname>listen_on</varname> is a list of addresses and ports for
- <command>b10-auth</command> to listen on.
- The list items are the <varname>address</varname> string
- and <varname>port</varname> number.
- By default, <command>b10-auth</command> listens on port 53
- on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
- </para>
-
- <para>
<varname>datasources</varname> configures data sources.
The list items include:
<varname>type</varname> to optionally choose the data source type
@@ -165,6 +156,15 @@
</para>
<para>
+ <varname>listen_on</varname> is a list of addresses and ports for
+ <command>b10-auth</command> to listen on.
+ The list items are the <varname>address</varname> string
+ and <varname>port</varname> number.
+ By default, <command>b10-auth</command> listens on port 53
+ on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
+ </para>
+
+ <para>
<varname>statistics-interval</varname> is the timer interval
in seconds for <command>b10-auth</command> to share its
statistics information to
@@ -209,6 +209,34 @@
</refsect1>
<refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>auth.queries.tcp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over TCP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>auth.queries.udp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over UDP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para>
<filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index d51495b..53c019f 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -13,6 +13,12 @@ query_bench_SOURCES += ../auth_srv.h ../auth_srv.cc
query_bench_SOURCES += ../auth_config.h ../auth_config.cc
query_bench_SOURCES += ../statistics.h ../statistics.cc
query_bench_SOURCES += ../auth_log.h ../auth_log.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+query_bench_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 3fe03c8..ab6404e 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -67,20 +67,21 @@ Query::findAddrs(ZoneFinder& zone, const Name& qname,
// Find A rrset
if (qname_ != qname || qtype_ != RRType::A()) {
ZoneFinder::FindResult a_result = zone.find(qname, RRType::A(), NULL,
- options);
+ options | dnssec_opt_);
if (a_result.code == ZoneFinder::SUCCESS) {
response_.addRRset(Message::SECTION_ADDITIONAL,
- boost::const_pointer_cast<RRset>(a_result.rrset));
+ boost::const_pointer_cast<RRset>(a_result.rrset), dnssec_);
}
}
// Find AAAA rrset
if (qname_ != qname || qtype_ != RRType::AAAA()) {
ZoneFinder::FindResult aaaa_result =
- zone.find(qname, RRType::AAAA(), NULL, options);
+ zone.find(qname, RRType::AAAA(), NULL, options | dnssec_opt_);
if (aaaa_result.code == ZoneFinder::SUCCESS) {
response_.addRRset(Message::SECTION_ADDITIONAL,
- boost::const_pointer_cast<RRset>(aaaa_result.rrset));
+ boost::const_pointer_cast<RRset>(aaaa_result.rrset),
+ dnssec_);
}
}
}
@@ -88,7 +89,7 @@ Query::findAddrs(ZoneFinder& zone, const Name& qname,
void
Query::putSOA(ZoneFinder& zone) const {
ZoneFinder::FindResult soa_result(zone.find(zone.getOrigin(),
- RRType::SOA()));
+ RRType::SOA(), NULL, dnssec_opt_));
if (soa_result.code != ZoneFinder::SUCCESS) {
isc_throw(NoSOA, "There's no SOA record in zone " <<
zone.getOrigin().toText());
@@ -99,7 +100,7 @@ Query::putSOA(ZoneFinder& zone) const {
* to insist.
*/
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(soa_result.rrset));
+ boost::const_pointer_cast<RRset>(soa_result.rrset), dnssec_);
}
}
@@ -107,14 +108,15 @@ void
Query::getAuthAdditional(ZoneFinder& zone) const {
// Fill in authority and addtional sections.
ZoneFinder::FindResult ns_result = zone.find(zone.getOrigin(),
- RRType::NS());
+ RRType::NS(), NULL,
+ dnssec_opt_);
// zone origin name should have NS records
if (ns_result.code != ZoneFinder::SUCCESS) {
isc_throw(NoApexNS, "There's no apex NS records in zone " <<
zone.getOrigin().toText());
} else {
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(ns_result.rrset));
+ boost::const_pointer_cast<RRset>(ns_result.rrset), dnssec_);
// Handle additional for authority section
getAdditional(zone, *ns_result.rrset);
}
@@ -147,12 +149,14 @@ Query::process() const {
keep_doing = false;
std::auto_ptr<RRsetList> target(qtype_is_any ? new RRsetList : NULL);
const ZoneFinder::FindResult db_result(
- result.zone_finder->find(qname_, qtype_, target.get()));
+ result.zone_finder->find(qname_, qtype_, target.get(),
+ dnssec_opt_));
switch (db_result.code) {
case ZoneFinder::DNAME: {
// First, put the dname into the answer
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
/*
* Empty DNAME should never get in, as it is impossible to
* create one in master file.
@@ -188,7 +192,7 @@ Query::process() const {
qname_.getLabelCount() -
db_result.rrset->getName().getLabelCount()).
concatenate(dname.getDname())));
- response_.addRRset(Message::SECTION_ANSWER, cname);
+ response_.addRRset(Message::SECTION_ANSWER, cname, dnssec_);
break;
}
case ZoneFinder::CNAME:
@@ -202,20 +206,23 @@ Query::process() const {
* So, just put it there.
*/
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
break;
case ZoneFinder::SUCCESS:
if (qtype_is_any) {
// If quety type is ANY, insert all RRs under the domain
// into answer section.
BOOST_FOREACH(RRsetPtr rrset, *target) {
- response_.addRRset(Message::SECTION_ANSWER, rrset);
+ response_.addRRset(Message::SECTION_ANSWER, rrset,
+ dnssec_);
// Handle additional for answer section
getAdditional(*result.zone_finder, *rrset.get());
}
} else {
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
// Handle additional for answer section
getAdditional(*result.zone_finder, *db_result.rrset);
}
@@ -233,7 +240,8 @@ Query::process() const {
case ZoneFinder::DELEGATION:
response_.setHeaderFlag(Message::HEADERFLAG_AA, false);
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
getAdditional(*result.zone_finder, *db_result.rrset);
break;
case ZoneFinder::NXDOMAIN:
@@ -245,6 +253,13 @@ Query::process() const {
// Just empty answer with SOA in authority section
putSOA(*result.zone_finder);
break;
+ default:
+ // These are new result codes (WILDCARD and WILDCARD_NXRRSET)
+ // They should not happen from the in-memory and the database
+ // backend isn't used yet.
+ // TODO: Implement before letting the database backends in
+ isc_throw(isc::NotImplemented, "Unknown result code");
+ break;
}
}
}
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index 13523e8..0ebbed8 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -139,11 +139,15 @@ public:
/// \param qname The query name
/// \param qtype The RR type of the query
/// \param response The response message to store the answer to the query.
+ /// \param dnssec If the answer should include signatures and NSEC/NSEC3 if
+ /// possible.
Query(const isc::datasrc::DataSourceClient& datasrc_client,
const isc::dns::Name& qname, const isc::dns::RRType& qtype,
- isc::dns::Message& response) :
+ isc::dns::Message& response, bool dnssec = false) :
datasrc_client_(datasrc_client), qname_(qname), qtype_(qtype),
- response_(response)
+ response_(response), dnssec_(dnssec),
+ dnssec_opt_(dnssec ? isc::datasrc::ZoneFinder::FIND_DNSSEC :
+ isc::datasrc::ZoneFinder::FIND_DEFAULT)
{}
/// Process the query.
@@ -211,6 +215,8 @@ private:
const isc::dns::Name& qname_;
const isc::dns::RRType& qtype_;
isc::dns::Message& response_;
+ const bool dnssec_;
+ const isc::datasrc::ZoneFinder::FindOptions dnssec_opt_;
};
}
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 76e5007..e62719f 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -37,11 +37,14 @@ public:
void inc(const AuthCounters::CounterType type);
bool submitStatistics() const;
void setStatisticsSession(isc::cc::AbstractSession* statistics_session);
+ void registerStatisticsValidator
+ (AuthCounters::validator_type validator);
// Currently for testing purpose only
uint64_t getCounter(const AuthCounters::CounterType type) const;
private:
std::vector<uint64_t> counters_;
isc::cc::AbstractSession* statistics_session_;
+ AuthCounters::validator_type validator_;
};
AuthCountersImpl::AuthCountersImpl() :
@@ -67,16 +70,25 @@ AuthCountersImpl::submitStatistics() const {
}
std::stringstream statistics_string;
statistics_string << "{\"command\": [\"set\","
- << "{ \"stats_data\": "
- << "{ \"auth.queries.udp\": "
+ << "{ \"owner\": \"Auth\","
+ << " \"data\":"
+ << "{ \"queries.udp\": "
<< counters_.at(AuthCounters::COUNTER_UDP_QUERY)
- << ", \"auth.queries.tcp\": "
+ << ", \"queries.tcp\": "
<< counters_.at(AuthCounters::COUNTER_TCP_QUERY)
<< " }"
<< "}"
<< "]}";
isc::data::ConstElementPtr statistics_element =
isc::data::Element::fromJSON(statistics_string);
+ // validate the statistics data before send
+ if (validator_) {
+ if (!validator_(
+ statistics_element->get("command")->get(1)->get("data"))) {
+ LOG_ERROR(auth_logger, AUTH_INVALID_STATISTICS_DATA);
+ return (false);
+ }
+ }
try {
// group_{send,recv}msg() can throw an exception when encountering
// an error, and group_recvmsg() will throw an exception on timeout.
@@ -105,6 +117,13 @@ AuthCountersImpl::setStatisticsSession
statistics_session_ = statistics_session;
}
+void
+AuthCountersImpl::registerStatisticsValidator
+ (AuthCounters::validator_type validator)
+{
+ validator_ = validator;
+}
+
// Currently for testing purpose only
uint64_t
AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
@@ -139,3 +158,10 @@ uint64_t
AuthCounters::getCounter(const AuthCounters::CounterType type) const {
return (impl_->getCounter(type));
}
+
+void
+AuthCounters::registerStatisticsValidator
+ (AuthCounters::validator_type validator) const
+{
+ return (impl_->registerStatisticsValidator(validator));
+}
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 5bf6436..c930414 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -131,6 +131,26 @@ public:
/// \return the value of the counter specified by \a type.
///
uint64_t getCounter(const AuthCounters::CounterType type) const;
+
+ /// \brief A type of validation function for the specification in
+ /// isc::config::ModuleSpec.
+ ///
+ /// This type might be useful for not only statistics
+ /// specificatoin but also for config_data specification and for
+ /// commnad.
+ ///
+ typedef boost::function<bool(const isc::data::ConstElementPtr&)>
+ validator_type;
+
+ /// \brief Register a function type of the statistics validation
+ /// function for AuthCounters.
+ ///
+ /// This method never throws an exception.
+ ///
+ /// \param validator A function type of the validation of
+ /// statistics specification.
+ ///
+ void registerStatisticsValidator(AuthCounters::validator_type validator) const;
};
#endif // __STATISTICS_H
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index 5cd2f5a..d27386e 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -37,6 +37,13 @@ run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += change_user_unittest.cc
run_unittests_SOURCES += statistics_unittest.cc
run_unittests_SOURCES += run_unittests.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+run_unittests_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
+
nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index 68f0a1d..b2d1094 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -111,7 +111,8 @@ public:
dname_name_("dname.example.com"),
has_SOA_(true),
has_apex_NS_(true),
- rrclass_(RRClass::IN())
+ rrclass_(RRClass::IN()),
+ include_rrsig_anyway_(false)
{
stringstream zone_stream;
zone_stream << soa_txt << zone_ns_txt << ns_addrs_txt <<
@@ -137,11 +138,18 @@ public:
// the apex NS.
void setApexNSFlag(bool on) { has_apex_NS_ = on; }
+ // Turn this on if you want it to return RRSIGs regardless of FIND_GLUE_OK
+ void setIncludeRRSIGAnyway(bool on) { include_rrsig_anyway_ = on; }
+
+ Name findPreviousName(const Name&) const {
+ isc_throw(isc::NotImplemented, "Mock doesn't support previous name");
+ }
+
private:
typedef map<RRType, ConstRRsetPtr> RRsetStore;
typedef map<Name, RRsetStore> Domains;
Domains domains_;
- void loadRRset(ConstRRsetPtr rrset) {
+ void loadRRset(RRsetPtr rrset) {
domains_[rrset->getName()][rrset->getType()] = rrset;
if (rrset->getName() == delegation_name_ &&
rrset->getType() == RRType::NS()) {
@@ -149,6 +157,26 @@ private:
} else if (rrset->getName() == dname_name_ &&
rrset->getType() == RRType::DNAME()) {
dname_rrset_ = rrset;
+ // Add some signatures
+ } else if (rrset->getName() == Name("example.com.") &&
+ rrset->getType() == RRType::NS()) {
+ rrset->addRRsig(RdataPtr(new generic::RRSIG("NS 5 3 3600 "
+ "20000101000000 "
+ "20000201000000 "
+ "12345 example.com. "
+ "FAKEFAKEFAKE")));
+ } else if (rrset->getType() == RRType::A()) {
+ rrset->addRRsig(RdataPtr(new generic::RRSIG("A 5 3 3600 "
+ "20000101000000 "
+ "20000201000000 "
+ "12345 example.com. "
+ "FAKEFAKEFAKE")));
+ } else if (rrset->getType() == RRType::AAAA()) {
+ rrset->addRRsig(RdataPtr(new generic::RRSIG("AAAA 5 3 3600 "
+ "20000101000000 "
+ "20000201000000 "
+ "12345 example.com. "
+ "FAKEFAKEFAKE")));
}
}
@@ -161,6 +189,7 @@ private:
ConstRRsetPtr delegation_rrset_;
ConstRRsetPtr dname_rrset_;
const RRClass rrclass_;
+ bool include_rrsig_anyway_;
};
ZoneFinder::FindResult
@@ -195,7 +224,26 @@ MockZoneFinder::find(const Name& name, const RRType& type,
RRsetStore::const_iterator found_rrset =
found_domain->second.find(type);
if (found_rrset != found_domain->second.end()) {
- return (FindResult(SUCCESS, found_rrset->second));
+ ConstRRsetPtr rrset;
+ // Strip whatever signature there is in case DNSSEC is not required
+ // Just to make sure the Query asks for it when it is needed
+ if (options & ZoneFinder::FIND_DNSSEC ||
+ include_rrsig_anyway_ ||
+ !found_rrset->second->getRRsig()) {
+ rrset = found_rrset->second;
+ } else {
+ RRsetPtr noconst(new RRset(found_rrset->second->getName(),
+ found_rrset->second->getClass(),
+ found_rrset->second->getType(),
+ found_rrset->second->getTTL()));
+ for (RdataIteratorPtr
+ i(found_rrset->second->getRdataIterator());
+ !i->isLast(); i->next()) {
+ noconst->addRdata(i->getCurrent());
+ }
+ rrset = noconst;
+ }
+ return (FindResult(SUCCESS, rrset));
}
// If not found but we have a target, fill it with all RRsets here
@@ -304,6 +352,58 @@ TEST_F(QueryTest, exactMatch) {
www_a_txt, zone_ns_txt, ns_addrs_txt);
}
+TEST_F(QueryTest, exactMatchIgnoreSIG) {
+ // Check that we do not include the RRSIG when not requested even when
+ // we receive it from the data source.
+ mock_finder->setIncludeRRSIGAnyway(true);
+ Query query(memory_client, qname, qtype, response);
+ EXPECT_NO_THROW(query.process());
+ // find match rrset
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
+ www_a_txt, zone_ns_txt, ns_addrs_txt);
+}
+
+TEST_F(QueryTest, dnssecPositive) {
+ // Just like exactMatch, but the signatures should be included as well
+ Query query(memory_client, qname, qtype, response, true);
+ EXPECT_NO_THROW(query.process());
+ // find match rrset
+ // We can't let responseCheck to check the additional section as well,
+ // it gets confused by the two RRs for glue.delegation.../RRSIG due
+ // to it's design and fixing it would be hard. Therefore we simply
+ // check manually this one time.
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 4, 6,
+ (www_a_txt + std::string("www.example.com. 3600 IN RRSIG "
+ "A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. "
+ "FAKEFAKEFAKE\n")).c_str(),
+ (zone_ns_txt + std::string("example.com. 3600 IN RRSIG NS 5 "
+ "3 3600 20000101000000 "
+ "20000201000000 12345 "
+ "example.com. FAKEFAKEFAKE\n")).
+ c_str(), NULL);
+ RRsetIterator iterator(response.beginSection(Message::SECTION_ADDITIONAL));
+ const char* additional[] = {
+ "glue.delegation.example.com. 3600 IN A 192.0.2.153\n",
+ "glue.delegation.example.com. 3600 IN RRSIG A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ "glue.delegation.example.com. 3600 IN AAAA 2001:db8::53\n",
+ "glue.delegation.example.com. 3600 IN RRSIG AAAA 5 3 3600 "
+ "20000101000000 20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ "noglue.example.com. 3600 IN A 192.0.2.53\n",
+ "noglue.example.com. 3600 IN RRSIG A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ NULL
+ };
+ for (const char** rr(additional); *rr != NULL; ++ rr) {
+ ASSERT_FALSE(iterator ==
+ response.endSection(Message::SECTION_ADDITIONAL));
+ EXPECT_EQ(*rr, (*iterator)->toText());
+ iterator ++;
+ }
+ EXPECT_TRUE(iterator == response.endSection(Message::SECTION_ADDITIONAL));
+}
+
TEST_F(QueryTest, exactAddrMatch) {
// find match rrset, omit additional data which has already been provided
// in the answer section from the additional.
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index 9a3dded..98e573b 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -16,6 +16,8 @@
#include <gtest/gtest.h>
+#include <boost/bind.hpp>
+
#include <cc/data.h>
#include <cc/session.h>
@@ -76,6 +78,13 @@ protected:
}
MockSession statistics_session_;
AuthCounters counters;
+ // no need to be inherited from the original class here.
+ class MockModuleSpec {
+ public:
+ bool validateStatistics(ConstElementPtr, const bool valid) const
+ { return (valid); }
+ };
+ MockModuleSpec module_spec_;
};
void
@@ -181,7 +190,7 @@ TEST_F(AuthCountersTest, submitStatisticsWithException) {
statistics_session_.setThrowSessionTimeout(false);
}
-TEST_F(AuthCountersTest, submitStatistics) {
+TEST_F(AuthCountersTest, submitStatisticsWithoutValidator) {
// Submit statistics data.
// Validate if it submits correct data.
@@ -201,12 +210,69 @@ TEST_F(AuthCountersTest, submitStatistics) {
// Command is "set".
EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
->get(0)->stringValue());
+ EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+ ->get(1)->get("owner")->stringValue());
ConstElementPtr statistics_data = statistics_session_.sent_msg
->get("command")->get(1)
- ->get("stats_data");
+ ->get("data");
// UDP query counter is 2 and TCP query counter is 1.
- EXPECT_EQ(2, statistics_data->get("auth.queries.udp")->intValue());
- EXPECT_EQ(1, statistics_data->get("auth.queries.tcp")->intValue());
+ EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+ EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
}
+TEST_F(AuthCountersTest, submitStatisticsWithValidator) {
+
+ //a validator for the unittest
+ AuthCounters::validator_type validator;
+ ConstElementPtr el;
+
+ // Submit statistics data with correct statistics validator.
+ validator = boost::bind(
+ &AuthCountersTest::MockModuleSpec::validateStatistics,
+ &module_spec_, _1, true);
+
+ EXPECT_TRUE(validator(el));
+
+ // register validator to AuthCounters
+ counters.registerStatisticsValidator(validator);
+
+ // Counters should be initialized to 0.
+ EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_UDP_QUERY));
+ EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+
+ // UDP query counter is set to 2.
+ counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+ counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+ // TCP query counter is set to 1.
+ counters.inc(AuthCounters::COUNTER_TCP_QUERY);
+
+ // checks the value returned by submitStatistics
+ EXPECT_TRUE(counters.submitStatistics());
+
+ // Destination is "Stats".
+ EXPECT_EQ("Stats", statistics_session_.msg_destination);
+ // Command is "set".
+ EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
+ ->get(0)->stringValue());
+ EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+ ->get(1)->get("owner")->stringValue());
+ ConstElementPtr statistics_data = statistics_session_.sent_msg
+ ->get("command")->get(1)
+ ->get("data");
+ // UDP query counter is 2 and TCP query counter is 1.
+ EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+ EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
+
+ // Submit statistics data with incorrect statistics validator.
+ validator = boost::bind(
+ &AuthCountersTest::MockModuleSpec::validateStatistics,
+ &module_spec_, _1, false);
+
+ EXPECT_FALSE(validator(el));
+
+ counters.registerStatisticsValidator(validator);
+
+ // checks the value returned by submitStatistics
+ EXPECT_FALSE(counters.submitStatistics());
+}
}
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index 6ab88d8..5ec0c9f 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -1,10 +1,16 @@
SUBDIRS = . tests
sbin_SCRIPTS = bind10
-CLEANFILES = bind10 bind10_src.pyc bind10_messages.py bind10_messages.pyc
+CLEANFILES = bind10 bind10_src.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.pyc
pkglibexecdir = $(libexecdir)/@PACKAGE@
-pyexec_DATA = bind10_messages.py
+
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+noinst_SCRIPTS = run_bind10.sh
bind10dir = $(pkgdatadir)
bind10_DATA = bob.spec
@@ -20,11 +26,12 @@ bind10.8: bind10.xml
endif
-bind10_messages.py: bind10_messages.mes
- $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/bind10/bind10_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py : bind10_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/bind10_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-bind10: bind10_src.py
+bind10: bind10_src.py $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10_src.py >$@
chmod a+x $@
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index d5ab905..1af4f14 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -2,12 +2,12 @@
.\" Title: bind10
.\" Author: [see the "AUTHORS" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 31, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "BIND10" "8" "March 31, 2011" "BIND10" "BIND10"
+.TH "BIND10" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -107,6 +107,18 @@ Display more about what is going on for
\fBbind10\fR
and its child processes\&.
.RE
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+bind10\&.boot_time
+.RS 4
+The date and time that the
+\fBbind10\fR
+process started\&. This is represented in ISO 8601 format\&.
+.RE
.SH "SEE ALSO"
.PP
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 1128264..b101ba8 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 31, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -217,6 +217,30 @@ The default is the basename of ARG 0.
<!--
TODO: configuration section
-->
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>bind10.boot_time</term>
+ <listitem><para>
+ The date and time that the <command>bind10</command>
+ process started.
+ This is represented in ISO 8601 format.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
<!--
<refsect1>
<title>FILES</title>
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index 4bac069..4debcdb 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -198,3 +198,7 @@ the message channel.
% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
An unknown child process has exited. The PID is printed, but no further
action will be taken by the boss process.
+
+% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index b497f7c..1687cb1 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -66,7 +66,7 @@ import isc.cc
import isc.util.process
import isc.net.parse
import isc.log
-from bind10_messages import *
+from isc.log_messages.bind10_messages import *
import isc.bind10.sockcreator
isc.log.init("b10-boss")
@@ -85,7 +85,7 @@ isc.util.process.rename(sys.argv[0])
# number, and the overall BIND 10 version number (set in configure.ac).
VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
-# This is for bind10.boottime of stats module
+# This is for boot_time of Boss
_BASETIME = time.gmtime()
class RestartSchedule:
@@ -307,6 +307,13 @@ class BoB:
process_list.append([pid, self.processes[pid].name])
return process_list
+ def _get_stats_data(self):
+ return { "owner": "Boss",
+ "data": { 'boot_time':
+ time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }
+ }
+
def command_handler(self, command, args):
logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
answer = isc.config.ccsession.create_answer(1, "command not implemented")
@@ -316,15 +323,26 @@ class BoB:
if command == "shutdown":
self.runnable = False
answer = isc.config.ccsession.create_answer(0)
+ elif command == "getstats":
+ answer = isc.config.ccsession.create_answer(0, self._get_stats_data())
elif command == "sendstats":
# send statistics data to the stats daemon immediately
- cmd = isc.config.ccsession.create_command(
- 'set', { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
- }})
- seq = self.cc_session.group_sendmsg(cmd, 'Stats')
- self.cc_session.group_recvmsg(True, seq)
- answer = isc.config.ccsession.create_answer(0)
+ stats_data = self._get_stats_data()
+ valid = self.ccs.get_module_spec().validate_statistics(
+ True, stats_data["data"])
+ if valid:
+ cmd = isc.config.ccsession.create_command('set', stats_data)
+ seq = self.cc_session.group_sendmsg(cmd, 'Stats')
+ # Consume the answer, in case it becomes a orphan message.
+ try:
+ self.cc_session.group_recvmsg(False, seq)
+ except isc.cc.session.SessionTimeout:
+ pass
+ answer = isc.config.ccsession.create_answer(0)
+ else:
+ logger.fatal(BIND10_INVALID_STATISTICS_DATA);
+ answer = isc.config.ccsession.create_answer(
+ 1, "specified statistics data is invalid")
elif command == "ping":
answer = isc.config.ccsession.create_answer(0, "pong")
elif command == "show_processes":
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index b5b9721..50e6e29 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -23,14 +23,14 @@ BIND10_PATH=@abs_top_builddir@/src/bin/bind10
PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
export PATH
-PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index d9e012f..d54ee56 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -2,12 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
PYTESTS = bind10_test.py
+noinst_SCRIPTS = $(PYTESTS)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,8 +20,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
+ chmod +x $(abs_builddir)/$$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 077190c..2efd940 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -137,9 +137,27 @@ class TestBoB(unittest.TestCase):
def group_sendmsg(self, msg, group):
(self.msg, self.group) = (msg, group)
def group_recvmsg(self, nonblock, seq): pass
+ class DummyModuleCCSession():
+ module_spec = isc.config.module_spec.ModuleSpec({
+ "module_name": "Boss",
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ })
+ def get_module_spec(self):
+ return self.module_spec
bob = BoB()
bob.verbose = True
bob.cc_session = DummySession()
+ bob.ccs = DummyModuleCCSession()
# a bad command
self.assertEqual(bob.command_handler(-1, None),
isc.config.ccsession.create_answer(1, "bad command"))
@@ -147,14 +165,22 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.command_handler("shutdown", None),
isc.config.ccsession.create_answer(0))
self.assertFalse(bob.runnable)
+ # "getstats" command
+ self.assertEqual(bob.command_handler("getstats", None),
+ isc.config.ccsession.create_answer(0,
+ { "owner": "Boss",
+ "data": {
+ 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }}))
# "sendstats" command
self.assertEqual(bob.command_handler("sendstats", None),
isc.config.ccsession.create_answer(0))
self.assertEqual(bob.cc_session.group, "Stats")
self.assertEqual(bob.cc_session.msg,
isc.config.ccsession.create_command(
- 'set', { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ "set", { "owner": "Boss",
+ "data": {
+ "boot_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", _BASETIME)
}}))
# "ping" command
self.assertEqual(bob.command_handler("ping", None),
diff --git a/src/bin/bindctl/Makefile.am b/src/bin/bindctl/Makefile.am
index cd8bcb3..700f26e 100644
--- a/src/bin/bindctl/Makefile.am
+++ b/src/bin/bindctl/Makefile.am
@@ -5,6 +5,8 @@ man_MANS = bindctl.1
EXTRA_DIST = $(man_MANS) bindctl.xml
+noinst_SCRIPTS = run_bindctl.sh
+
python_PYTHON = __init__.py bindcmd.py cmdparse.py exception.py moduleinfo.py \
mycollections.py
pythondir = $(pyexecdir)/bindctl
diff --git a/src/bin/bindctl/run_bindctl.sh.in b/src/bin/bindctl/run_bindctl.sh.in
index 8f6ba59..f4cc40c 100755
--- a/src/bin/bindctl/run_bindctl.sh.in
+++ b/src/bin/bindctl/run_bindctl.sh.in
@@ -20,14 +20,14 @@ export PYTHON_EXEC
BINDCTL_PATH=@abs_top_builddir@/src/bin/bindctl
-PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/bindctl/tests/Makefile.am b/src/bin/bindctl/tests/Makefile.am
index 891d413..3d08a17 100644
--- a/src/bin/bindctl/tests/Makefile.am
+++ b/src/bin/bindctl/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +19,6 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/cfgmgr/b10-cfgmgr.py.in b/src/bin/cfgmgr/b10-cfgmgr.py.in
index 8befbdf..2ccc430 100755
--- a/src/bin/cfgmgr/b10-cfgmgr.py.in
+++ b/src/bin/cfgmgr/b10-cfgmgr.py.in
@@ -28,7 +28,7 @@ import os.path
import isc.log
isc.log.init("b10-cfgmgr")
from isc.config.cfgmgr import ConfigManager, ConfigManagerDataReadError, logger
-from cfgmgr_messages import *
+from isc.log_messages.cfgmgr_messages import *
isc.util.process.rename()
diff --git a/src/bin/cfgmgr/plugins/tests/Makefile.am b/src/bin/cfgmgr/plugins/tests/Makefile.am
index 07b7a85..ffea2d7 100644
--- a/src/bin/cfgmgr/plugins/tests/Makefile.am
+++ b/src/bin/cfgmgr/plugins/tests/Makefile.am
@@ -7,7 +7,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,8 +19,8 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env B10_TEST_PLUGIN_DIR=$(abs_srcdir)/..:$(abs_builddir)/.. \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ B10_TEST_PLUGIN_DIR=$(abs_srcdir)/..:$(abs_builddir)/.. \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/dns/python/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/cfgmgr/tests/Makefile.am b/src/bin/cfgmgr/tests/Makefile.am
index bd67241..a2e43ff 100644
--- a/src/bin/cfgmgr/tests/Makefile.am
+++ b/src/bin/cfgmgr/tests/Makefile.am
@@ -1,13 +1,14 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-cfgmgr_test.py
-EXTRA_DIST = $(PYTESTS) testdata/plugins/testplugin.py
+noinst_SCRIPTS = $(PYTESTS)
+EXTRA_DIST = testdata/plugins/testplugin.py
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,9 +20,10 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env TESTDATA_PATH=$(abs_srcdir)/testdata \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ TESTDATA_PATH=$(abs_srcdir)/testdata \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/python/isc/config \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/python/isc/config \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/cmdctl/Makefile.am b/src/bin/cmdctl/Makefile.am
index fcd23f8..e302fa6 100644
--- a/src/bin/cmdctl/Makefile.am
+++ b/src/bin/cmdctl/Makefile.am
@@ -3,7 +3,9 @@ SUBDIRS = . tests
pkglibexecdir = $(libexecdir)/@PACKAGE@
pkglibexec_SCRIPTS = b10-cmdctl
-pyexec_DATA = cmdctl_messages.py
+
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
b10_cmdctldir = $(pkgdatadir)
@@ -19,7 +21,9 @@ b10_cmdctl_DATA += cmdctl.spec
EXTRA_DIST = $(CMDCTL_CONFIGURATIONS)
-CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec cmdctl_messages.py cmdctl_messages.pyc
+CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.pyc
man_MANS = b10-cmdctl.8
EXTRA_DIST += $(man_MANS) b10-cmdctl.xml cmdctl_messages.mes
@@ -34,11 +38,12 @@ endif
cmdctl.spec: cmdctl.spec.pre
$(SED) -e "s|@@SYSCONFDIR@@|$(sysconfdir)|" cmdctl.spec.pre >$@
-cmdctl_messages.py: cmdctl_messages.mes
- $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/cmdctl/cmdctl_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py : cmdctl_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cmdctl_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-cmdctl: cmdctl.py cmdctl_messages.py
+b10-cmdctl: cmdctl.py $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" cmdctl.py >$@
chmod a+x $@
diff --git a/src/bin/cmdctl/cmdctl.py.in b/src/bin/cmdctl/cmdctl.py.in
index 2f89894..fcd69b8 100755
--- a/src/bin/cmdctl/cmdctl.py.in
+++ b/src/bin/cmdctl/cmdctl.py.in
@@ -47,7 +47,7 @@ import isc.net.parse
from optparse import OptionParser, OptionValueError
from hashlib import sha1
from isc.util import socketserver_mixin
-from cmdctl_messages import *
+from isc.log_messages.cmdctl_messages import *
# TODO: these debug-levels are hard-coded here; we are planning on
# creating a general set of debug levels, see ticket #1074. When done,
diff --git a/src/bin/cmdctl/run_b10-cmdctl.sh.in b/src/bin/cmdctl/run_b10-cmdctl.sh.in
index 6a519e1..7e63249 100644
--- a/src/bin/cmdctl/run_b10-cmdctl.sh.in
+++ b/src/bin/cmdctl/run_b10-cmdctl.sh.in
@@ -19,9 +19,17 @@ PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
CMD_CTRLD_PATH=@abs_top_builddir@/src/bin/cmdctl
-PYTHONPATH=@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
export PYTHONPATH
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am
index e4ec9d4..89d89ea 100644
--- a/src/bin/cmdctl/tests/Makefile.am
+++ b/src/bin/cmdctl/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,7 +19,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cmdctl \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SPEC_PATH=$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SRC_PATH=$(abs_top_srcdir)/src/bin/cmdctl \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/dhcp6/Makefile.am b/src/bin/dhcp6/Makefile.am
index 824e8a8..805d6bb 100644
--- a/src/bin/dhcp6/Makefile.am
+++ b/src/bin/dhcp6/Makefile.am
@@ -19,7 +19,7 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
CLEANFILES = *.gcno *.gcda spec_config.h
man_MANS = b10-dhcp6.8
-EXTRA_DIST = $(man_MANS) dhcp6.spec
+EXTRA_DIST = $(man_MANS) dhcp6.spec interfaces.txt
#if ENABLE_MAN
#b10-dhcp6.8: b10-dhcp6.xml
@@ -31,8 +31,8 @@ spec_config.h: spec_config.h.pre
BUILT_SOURCES = spec_config.h
pkglibexec_PROGRAMS = b10-dhcp6
-b10_dhcp6_SOURCES = main.cc
-b10_dhcp6_SOURCES += dhcp6.h
+b10_dhcp6_SOURCES = main.cc iface_mgr.cc pkt6.cc dhcp6_srv.cc
+b10_dhcp6_SOURCES += iface_mgr.h pkt6.h dhcp6_srv.h dhcp6.h
b10_dhcp6_LDADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/util/libutil.la
@@ -49,5 +49,4 @@ b10_dhcp6_LDADD += $(SQLITE_LIBS)
# TODO: config.h.in is wrong because doesn't honor pkgdatadir
# and can't use @datadir@ because doesn't expand default ${prefix}
b10_dhcp6dir = $(pkgdatadir)
-b10_dhcp6_DATA = dhcp6.spec
-
+b10_dhcp6_DATA = dhcp6.spec interfaces.txt
diff --git a/src/bin/dhcp6/b10-dhcp6.8 b/src/bin/dhcp6/b10-dhcp6.8
index 14a5621..a05bf71 100644
--- a/src/bin/dhcp6/b10-dhcp6.8
+++ b/src/bin/dhcp6/b10-dhcp6.8
@@ -21,8 +21,8 @@
.SH "NAME"
b10-dhcp6 \- DHCPv6 daemon in BIND10 architecture
.SH "SYNOPSIS"
-.HP \w'\fBb10\-dhcp6\fR\ 'u
-\fBb10\-dhcp6\fR [\fB\-u\ \fR\fB\fIusername\fR\fR] [\fB\-v\fR]
+.HP \w'\fBb10\-dhcp6
+\fBb10\-dhcp6\fR [\fB\-v\fR]
.SH "DESCRIPTION"
.PP
The
diff --git a/src/bin/dhcp6/dhcp6.h b/src/bin/dhcp6/dhcp6.h
index 322b06c..b5512f3 100644
--- a/src/bin/dhcp6/dhcp6.h
+++ b/src/bin/dhcp6/dhcp6.h
@@ -1,29 +1,19 @@
-/* dhcp6.h
-
- DHCPv6 Protocol structures... */
-
-/*
- * Copyright (c) 2006-2011 by Internet Systems Consortium, Inc. ("ISC")
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
- * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- * Internet Systems Consortium, Inc.
- * 950 Charter Street
- * Redwood City, CA 94063
- * <info at isc.org>
- * https://www.isc.org/
- */
-
+// Copyright (C) 2006-2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DHCP6_H
+#define DHCP6_H
/* DHCPv6 Option codes: */
@@ -136,8 +126,11 @@ extern const int dhcpv6_type_name_max;
/*
* DHCPv6 well-known multicast addressess, from section 5.1 of RFC 3315
*/
-#define All_DHCP_Relay_Agents_and_Servers "FF02::1:2"
-#define All_DHCP_Servers "FF05::1:3"
+#define ALL_DHCP_RELAY_AGENTS_AND_SERVERS "ff02::1:2"
+#define ALL_DHCP_SERVERS "ff05::1:3"
+
+#define DHCP6_CLIENT_PORT 546
+#define DHCP6_SERVER_PORT 547
/*
* DHCPv6 Retransmission Constants (RFC3315 section 5.5, RFC 5007)
@@ -171,29 +164,6 @@ extern const int dhcpv6_type_name_max;
#define LQ6_MAX_RT 10
#define LQ6_MAX_RC 5
-/*
- * Normal packet format, defined in section 6 of RFC 3315
- */
-struct dhcpv6_packet {
- unsigned char msg_type;
- unsigned char transaction_id[3];
- unsigned char options[FLEXIBLE_ARRAY_MEMBER];
-};
-
-/* Offset into DHCPV6 Reply packets where Options spaces commence. */
-#define REPLY_OPTIONS_INDEX 4
-
-/*
- * Relay packet format, defined in section 7 of RFC 3315
- */
-struct dhcpv6_relay_packet {
- unsigned char msg_type;
- unsigned char hop_count;
- unsigned char link_address[16];
- unsigned char peer_address[16];
- unsigned char options[FLEXIBLE_ARRAY_MEMBER];
-};
-
/* Leasequery query-types (RFC 5007) */
#define LQ6QT_BY_ADDRESS 1
@@ -211,3 +181,4 @@ struct dhcpv6_relay_packet {
#define IRT_DEFAULT 86400
#define IRT_MINIMUM 600
+#endif
diff --git a/src/bin/dhcp6/dhcp6_srv.cc b/src/bin/dhcp6/dhcp6_srv.cc
new file mode 100644
index 0000000..4d9244f
--- /dev/null
+++ b/src/bin/dhcp6/dhcp6_srv.cc
@@ -0,0 +1,55 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "dhcp6/pkt6.h"
+#include "dhcp6/iface_mgr.h"
+#include "dhcp6/dhcp6_srv.h"
+
+using namespace std;
+using namespace isc;
+
+Dhcpv6Srv::Dhcpv6Srv() {
+ cout << "Initialization" << endl;
+
+ // first call to instance() will create IfaceMgr (it's a singleton)
+ // it may throw something if things go wrong
+ IfaceMgr::instance();
+}
+
+Dhcpv6Srv::~Dhcpv6Srv() {
+ cout << "DHCPv6 Srv shutdown." << endl;
+}
+
+bool
+Dhcpv6Srv::run() {
+ while (true) {
+ Pkt6* pkt;
+
+ pkt = IfaceMgr::instance().receive();
+
+ if (pkt) {
+ cout << "Received " << pkt->data_len_ << " bytes, echoing back."
+ << endl;
+ IfaceMgr::instance().send(*pkt);
+ delete pkt;
+ }
+
+ // TODO add support for config session (see src/bin/auth/main.cc)
+ // so this daemon can be controlled from bob
+ sleep(1);
+
+ }
+
+ return (true);
+}
diff --git a/src/bin/dhcp6/dhcp6_srv.h b/src/bin/dhcp6/dhcp6_srv.h
new file mode 100644
index 0000000..a02f5f6
--- /dev/null
+++ b/src/bin/dhcp6/dhcp6_srv.h
@@ -0,0 +1,40 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DHCPV6_SRV_H
+#define DHCPV6_SRV_H
+
+#include <iostream>
+
+namespace isc {
+ class Dhcpv6Srv {
+ private:
+ // defined private on purpose. We don't want to have more than
+ // one copy
+ Dhcpv6Srv(const Dhcpv6Srv& src);
+ Dhcpv6Srv& operator=(const Dhcpv6Srv& src);
+
+ public:
+ // default constructor
+ Dhcpv6Srv();
+ ~Dhcpv6Srv();
+
+ bool run();
+
+ protected:
+ bool shutdown;
+ };
+};
+
+#endif // DHCP6_SRV_H
diff --git a/src/bin/dhcp6/iface_mgr.cc b/src/bin/dhcp6/iface_mgr.cc
new file mode 100644
index 0000000..1e2551a
--- /dev/null
+++ b/src/bin/dhcp6/iface_mgr.cc
@@ -0,0 +1,581 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sstream>
+#include <fstream>
+#include <string.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include "dhcp6/iface_mgr.h"
+#include "dhcp6/dhcp6.h"
+#include "exceptions/exceptions.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+
+namespace isc {
+
+// IfaceMgr is a singleton implementation
+IfaceMgr* IfaceMgr::instance_ = 0;
+
+void
+IfaceMgr::instanceCreate() {
+ if (instance_) {
+ // no need to do anything. Instance is already created.
+ // Who called it again anyway? Uh oh. Had to be us, as
+ // this is private method.
+ return;
+ }
+ instance_ = new IfaceMgr();
+}
+
+IfaceMgr&
+IfaceMgr::instance() {
+ if (instance_ == 0)
+ instanceCreate();
+ return (*instance_);
+}
+
+IfaceMgr::Iface::Iface(const std::string& name, int ifindex)
+ :name_(name), ifindex_(ifindex), mac_len_(0) {
+ memset(mac_, 0, 20);
+}
+
+std::string
+IfaceMgr::Iface::getFullName() const {
+ ostringstream tmp;
+ tmp << name_ << "/" << ifindex_;
+ return (tmp.str());
+}
+
+std::string
+IfaceMgr::Iface::getPlainMac() const {
+ ostringstream tmp;
+ for (int i=0; i<mac_len_; i++) {
+ tmp.fill('0');
+ tmp.width(2);
+ tmp << (hex) << (int) mac_[i];
+ if (i<mac_len_-1) {
+ tmp << ":";
+ }
+ }
+ return (tmp.str());
+}
+
+IfaceMgr::IfaceMgr() {
+
+ cout << "IfaceMgr initialization." << endl;
+
+ try {
+ // required for sending/receiving packets
+ // let's keep it in front, just in case someone
+ // wants to send anything during initialization
+ control_buf_len_ = CMSG_SPACE(sizeof(struct in6_pktinfo));
+ control_buf_ = new char[control_buf_len_];
+
+ detectIfaces();
+
+ if (!openSockets()) {
+ isc_throw(Unexpected, "Failed to open/bind sockets.");
+ }
+ } catch (const std::exception& ex) {
+ cout << "IfaceMgr creation failed:" << ex.what() << endl;
+
+ // TODO Uncomment this (or call LOG_FATAL) once
+ // interface detection is implemented. Otherwise
+ // it is not possible to run tests in a portable
+ // way (see detectIfaces() method).
+ // throw ex;
+ }
+}
+
+IfaceMgr::~IfaceMgr() {
+ if (control_buf_) {
+ delete [] control_buf_;
+ control_buf_ = 0;
+ control_buf_len_ = 0;
+ }
+}
+
+void
+IfaceMgr::detectIfaces() {
+ string ifaceName, linkLocal;
+
+ // TODO do the actual detection. Currently interface detection is faked
+ // by reading a text file.
+
+ cout << "Interface detection is not implemented yet. "
+ << "Reading interfaces.txt file instead." << endl;
+ cout << "Please use format: interface-name link-local-address" << endl;
+
+ try {
+ ifstream interfaces("interfaces.txt");
+
+ if (!interfaces.good()) {
+ cout << "Failed to read interfaces.txt file." << endl;
+ isc_throw(Unexpected, "Failed to read interfaces.txt");
+ }
+ interfaces >> ifaceName;
+ interfaces >> linkLocal;
+
+ cout << "Detected interface " << ifaceName << "/" << linkLocal << endl;
+
+ Iface iface(ifaceName, if_nametoindex( ifaceName.c_str() ) );
+ IOAddress addr(linkLocal);
+ iface.addrs_.push_back(addr);
+ ifaces_.push_back(iface);
+ interfaces.close();
+ } catch (const std::exception& ex) {
+ // TODO: deallocate whatever memory we used
+ // not that important, since this function is going to be
+ // thrown away as soon as we get proper interface detection
+ // implemented
+
+ // TODO Do LOG_FATAL here
+ std::cerr << "Interface detection failed." << std::endl;
+ throw ex;
+ }
+}
+
+bool
+IfaceMgr::openSockets() {
+ int sock;
+
+ for (IfaceLst::iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+
+ for (Addr6Lst::iterator addr=iface->addrs_.begin();
+ addr!=iface->addrs_.end();
+ ++addr) {
+
+ sock = openSocket(iface->name_, *addr,
+ DHCP6_SERVER_PORT);
+ if (sock<0) {
+ cout << "Failed to open unicast socket." << endl;
+ return (false);
+ }
+ sendsock_ = sock;
+
+ sock = openSocket(iface->name_,
+ IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
+ DHCP6_SERVER_PORT);
+ if (sock<0) {
+ cout << "Failed to open multicast socket." << endl;
+ close(sendsock_);
+ return (false);
+ }
+ recvsock_ = sock;
+ }
+ }
+
+ return (true);
+}
+
+void
+IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
+ for (IfaceLst::const_iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+ out << "Detected interface " << iface->getFullName() << endl;
+ out << " " << iface->addrs_.size() << " addr(s):" << endl;
+ for (Addr6Lst::const_iterator addr=iface->addrs_.begin();
+ addr != iface->addrs_.end();
+ ++addr) {
+ out << " " << addr->toText() << endl;
+ }
+ out << " mac: " << iface->getPlainMac() << endl;
+ }
+}
+
+IfaceMgr::Iface*
+IfaceMgr::getIface(int ifindex) {
+ for (IfaceLst::iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+ if (iface->ifindex_ == ifindex)
+ return (&(*iface));
+ }
+
+ return (NULL); // not found
+}
+
+IfaceMgr::Iface*
+IfaceMgr::getIface(const std::string& ifname) {
+ for (IfaceLst::iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+ if (iface->name_ == ifname)
+ return (&(*iface));
+ }
+
+ return (NULL); // not found
+}
+
+
+/**
+ * Opens UDP/IPv6 socket and binds it to specific address, interface and port.
+ *
+ * @param ifname name of the interface
+ * @param addr address to be bound.
+ * @param port UDP port.
+ * @param mcast Should multicast address also be bound?
+ *
+ * @return socket descriptor, if socket creation, binding and multicast
+ * group join were all successful. -1 otherwise.
+ */
+int
+IfaceMgr::openSocket(const std::string& ifname,
+ const IOAddress& addr,
+ int port) {
+ struct sockaddr_in6 addr6;
+
+ cout << "Creating socket on " << ifname << "/" << addr.toText()
+ << "/port=" << port << endl;
+
+ memset(&addr6, 0, sizeof(addr6));
+ addr6.sin6_family = AF_INET6;
+ addr6.sin6_port = htons(port);
+ addr6.sin6_scope_id = if_nametoindex(ifname.c_str());
+
+ memcpy(&addr6.sin6_addr,
+ addr.getAddress().to_v6().to_bytes().data(),
+ sizeof(addr6.sin6_addr));
+#ifdef HAVE_SA_LEN
+ addr6->sin6_len = sizeof(addr6);
+#endif
+
+ // TODO: use sockcreator once it becomes available
+
+ // make a socket
+ int sock = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (sock < 0) {
+ cout << "Failed to create UDP6 socket." << endl;
+ return (-1);
+ }
+
+ /* Set the REUSEADDR option so that we don't fail to start if
+ we're being restarted. */
+ int flag = 1;
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+ (char *)&flag, sizeof(flag)) < 0) {
+ cout << "Can't set SO_REUSEADDR option on dhcpv6 socket." << endl;
+ close(sock);
+ return (-1);
+ }
+
+ if (bind(sock, (struct sockaddr *)&addr6, sizeof(addr6)) < 0) {
+ cout << "Failed to bind socket " << sock << " to " << addr.toText()
+ << "/port=" << port << endl;
+ close(sock);
+ return (-1);
+ }
+#ifdef IPV6_RECVPKTINFO
+ /* RFC3542 - a new way */
+ if (setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO,
+ &flag, sizeof(flag)) != 0) {
+ cout << "setsockopt: IPV6_RECVPKTINFO failed." << endl;
+ close(sock);
+ return (-1);
+ }
+#else
+ /* RFC2292 - an old way */
+ if (setsockopt(sock, IPPROTO_IPV6, IPV6_PKTINFO,
+ &flag, sizeof(flag)) != 0) {
+ cout << "setsockopt: IPV6_PKTINFO: failed." << endl;
+ close(sock);
+ return (-1);
+ }
+#endif
+
+ // multicast stuff
+
+ if (addr.getAddress().to_v6().is_multicast()) {
+ // both mcast (ALL_DHCP_RELAY_AGENTS_AND_SERVERS and ALL_DHCP_SERVERS)
+ // are link and site-scoped, so there is no sense to join those groups
+ // with global addresses.
+
+ if ( !joinMcast( sock, ifname,
+ string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
+ close(sock);
+ return (-1);
+ }
+ }
+
+ cout << "Created socket " << sock << " on " << ifname << "/" <<
+ addr.toText() << "/port=" << port << endl;
+
+ return (sock);
+}
+
+/**
+ * joins multicast group
+ *
+ * @param sock socket file descriptor
+ * @param ifname name of the interface (DHCPv6 uses link-scoped mc groups)
+ * @param mcast multicast address to join (string)
+ *
+ * @return true if joined successfully, false otherwise
+ */
+bool
+IfaceMgr::joinMcast(int sock, const std::string& ifname,
+const std::string & mcast) {
+
+ struct ipv6_mreq mreq;
+
+ if (inet_pton(AF_INET6, mcast.c_str(),
+ &mreq.ipv6mr_multiaddr) <= 0) {
+ cout << "Failed to convert " << ifname
+ << " to IPv6 multicast address." << endl;
+ return (false);
+ }
+
+ mreq.ipv6mr_interface = if_nametoindex(ifname.c_str());
+ if (setsockopt(sock, IPPROTO_IPV6, IPV6_JOIN_GROUP,
+ &mreq, sizeof(mreq)) < 0) {
+ cout << "Failed to join " << mcast << " multicast group." << endl;
+ return (false);
+ }
+
+ cout << "Joined multicast " << mcast << " group." << endl;
+
+ return (true);
+}
+
+/**
+ * Sends UDP packet over IPv6.
+ *
+ * All parameters for actual transmission are specified in
+ * Pkt6 structure itself. That includes destination address,
+ * src/dst port and interface over which data will be sent.
+ *
+ * @param pkt A packet object that is going to be sent.
+ *
+ * @return True, if transmission was successful. False otherwise.
+ */
+bool
+IfaceMgr::send(Pkt6 &pkt) {
+ struct msghdr m;
+ struct iovec v;
+ int result;
+ struct in6_pktinfo *pktinfo;
+ struct cmsghdr *cmsg;
+ memset(control_buf_, 0, control_buf_len_);
+
+ /*
+ * Initialize our message header structure.
+ */
+ memset(&m, 0, sizeof(m));
+
+ /*
+ * Set the target address we're sending to.
+ */
+ sockaddr_in6 to;
+ memset(&to, 0, sizeof(to));
+ to.sin6_family = AF_INET6;
+ to.sin6_port = htons(pkt.remote_port_);
+ memcpy(&to.sin6_addr,
+ pkt.remote_addr_.getAddress().to_v6().to_bytes().data(),
+ 16);
+ to.sin6_scope_id = pkt.ifindex_;
+
+ m.msg_name = &to;
+ m.msg_namelen = sizeof(to);
+
+ /*
+ * Set the data buffer we're sending. (Using this wacky
+ * "scatter-gather" stuff... we only have a single chunk
+ * of data to send, so we declare a single vector entry.)
+ */
+ v.iov_base = (char *) &pkt.data_[0];
+ v.iov_len = pkt.data_len_;
+ m.msg_iov = &v;
+ m.msg_iovlen = 1;
+
+ /*
+ * Setting the interface is a bit more involved.
+ *
+ * We have to create a "control message", and set that to
+ * define the IPv6 packet information. We could set the
+ * source address if we wanted, but we can safely let the
+ * kernel decide what that should be.
+ */
+ m.msg_control = control_buf_;
+ m.msg_controllen = control_buf_len_;
+ cmsg = CMSG_FIRSTHDR(&m);
+ cmsg->cmsg_level = IPPROTO_IPV6;
+ cmsg->cmsg_type = IPV6_PKTINFO;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(*pktinfo));
+ pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+ memset(pktinfo, 0, sizeof(*pktinfo));
+ pktinfo->ipi6_ifindex = pkt.ifindex_;
+ m.msg_controllen = cmsg->cmsg_len;
+
+ result = sendmsg(sendsock_, &m, 0);
+ if (result < 0) {
+ cout << "Send packet failed." << endl;
+ }
+ cout << "Sent " << result << " bytes." << endl;
+
+ cout << "Sent " << pkt.data_len_ << " bytes over "
+ << pkt.iface_ << "/" << pkt.ifindex_ << " interface: "
+ << " dst=" << pkt.remote_addr_.toText()
+ << ", src=" << pkt.local_addr_.toText()
+ << endl;
+
+ return (result);
+}
+
+
+/**
+ * Attempts to receive UDP/IPv6 packet over open sockets.
+ *
+ * TODO Start using select() and add timeout to be able
+ * to not wait infinitely, but rather do something useful
+ * (e.g. remove expired leases)
+ *
+ * @return Object prepresenting received packet.
+ */
+Pkt6*
+IfaceMgr::receive() {
+ struct msghdr m;
+ struct iovec v;
+ int result;
+ struct cmsghdr* cmsg;
+ struct in6_pktinfo* pktinfo;
+ struct sockaddr_in6 from;
+ struct in6_addr to_addr;
+ Pkt6* pkt;
+ char addr_str[INET6_ADDRSTRLEN];
+
+ try {
+ // RFC3315 states that server responses may be
+ // fragmented if they are over MTU. There is no
+ // text whether client's packets may be larger
+ // than 1500. Nevertheless to be on the safe side
+ // we use larger buffer. This buffer limit is checked
+ // during reception (see iov_len below), so we are
+ // safe
+ pkt = new Pkt6(65536);
+ } catch (const std::exception& ex) {
+ cout << "Failed to create new packet." << endl;
+ return (0);
+ }
+
+ memset(control_buf_, 0, control_buf_len_);
+
+ memset(&from, 0, sizeof(from));
+ memset(&to_addr, 0, sizeof(to_addr));
+
+ /*
+ * Initialize our message header structure.
+ */
+ memset(&m, 0, sizeof(m));
+
+ /*
+ * Point so we can get the from address.
+ */
+ m.msg_name = &from;
+ m.msg_namelen = sizeof(from);
+
+ /*
+ * Set the data buffer we're receiving. (Using this wacky
+ * "scatter-gather" stuff... but we that doesn't really make
+ * sense for us, so we use a single vector entry.)
+ */
+ v.iov_base = (void*)&pkt->data_[0];
+ v.iov_len = pkt->data_len_;
+ m.msg_iov = &v;
+ m.msg_iovlen = 1;
+
+ /*
+ * Getting the interface is a bit more involved.
+ *
+ * We set up some space for a "control message". We have
+ * previously asked the kernel to give us packet
+ * information (when we initialized the interface), so we
+ * should get the destination address from that.
+ */
+ m.msg_control = control_buf_;
+ m.msg_controllen = control_buf_len_;
+
+ result = recvmsg(recvsock_, &m, 0);
+
+ if (result >= 0) {
+ /*
+ * If we did read successfully, then we need to loop
+ * through the control messages we received and
+ * find the one with our destination address.
+ *
+ * We also keep a flag to see if we found it. If we
+ * didn't, then we consider this to be an error.
+ */
+ int found_pktinfo = 0;
+ cmsg = CMSG_FIRSTHDR(&m);
+ while (cmsg != NULL) {
+ if ((cmsg->cmsg_level == IPPROTO_IPV6) &&
+ (cmsg->cmsg_type == IPV6_PKTINFO)) {
+ pktinfo = (struct in6_pktinfo*)CMSG_DATA(cmsg);
+ to_addr = pktinfo->ipi6_addr;
+ pkt->ifindex_ = pktinfo->ipi6_ifindex;
+ found_pktinfo = 1;
+ }
+ cmsg = CMSG_NXTHDR(&m, cmsg);
+ }
+ if (!found_pktinfo) {
+ cout << "Unable to find pktinfo" << endl;
+ delete pkt;
+ return (0);
+ }
+ } else {
+ cout << "Failed to receive data." << endl;
+ delete pkt;
+ return (0);
+ }
+
+ // That's ugly.
+ // TODO add IOAddress constructor that will take struct in6_addr*
+ inet_ntop(AF_INET6, &to_addr, addr_str,INET6_ADDRSTRLEN);
+ pkt->local_addr_ = IOAddress(string(addr_str));
+
+ inet_ntop(AF_INET6, &from.sin6_addr, addr_str, INET6_ADDRSTRLEN);
+ pkt->remote_addr_ = IOAddress(string(addr_str));
+
+ pkt->remote_port_ = ntohs(from.sin6_port);
+
+ Iface* received = getIface(pkt->ifindex_);
+ if (received) {
+ pkt->iface_ = received->name_;
+ } else {
+ cout << "Received packet over unknown interface (ifindex="
+ << pkt->ifindex_ << ")." << endl;
+ delete pkt;
+ return (0);
+ }
+
+ pkt->data_len_ = result;
+
+ // TODO Move this to LOG_DEBUG
+ cout << "Received " << pkt->data_len_ << " bytes over "
+ << pkt->iface_ << "/" << pkt->ifindex_ << " interface: "
+ << " src=" << pkt->remote_addr_.toText()
+ << ", dst=" << pkt->local_addr_.toText()
+ << endl;
+
+ return (pkt);
+}
+
+}
diff --git a/src/bin/dhcp6/iface_mgr.h b/src/bin/dhcp6/iface_mgr.h
new file mode 100644
index 0000000..39061da
--- /dev/null
+++ b/src/bin/dhcp6/iface_mgr.h
@@ -0,0 +1,103 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef IFACE_MGR_H
+#define IFACE_MGR_H
+
+#include <list>
+#include "io_address.h"
+#include "dhcp6/pkt6.h"
+
+namespace isc {
+
+ /**
+ * IfaceMgr is an interface manager class that detects available network
+ * interfaces, configured addresses, link-local addresses, and provides
+ * API for using sockets.
+ *
+ */
+ class IfaceMgr {
+ public:
+ typedef std::list<isc::asiolink::IOAddress> Addr6Lst;
+ struct Iface { // TODO: could be a class as well
+ std::string name_; // network interface name
+ int ifindex_; // interface index (a value that uniquely indentifies
+ // an interface
+ Addr6Lst addrs_;
+ char mac_[20]; // Infiniband used 20 bytes indentifiers
+ int mac_len_;
+
+ Iface(const std::string& name, int ifindex);
+ std::string getFullName() const;
+ std::string getPlainMac() const;
+
+ int sendsock_; // socket used to sending data
+ int recvsock_; // socket used for receiving data
+
+ // next field is not needed, let's keep it in cointainers
+ };
+
+ // TODO performance improvement: we may change this into
+ // 2 maps (ifindex-indexed and name-indexed) and
+ // also hide it (make it public make tests easier for now)
+ typedef std::list<Iface> IfaceLst;
+
+ static IfaceMgr& instance();
+
+ Iface * getIface(int ifindex);
+ Iface * getIface(const std::string& ifname);
+
+ void printIfaces(std::ostream& out = std::cout);
+
+ bool send(Pkt6& pkt);
+ Pkt6* receive();
+
+ // don't use private, we need derived classes in tests
+ protected:
+ IfaceMgr(); // don't create IfaceMgr directly, use instance() method
+ ~IfaceMgr();
+
+ void detectIfaces();
+
+ int openSocket(const std::string& ifname,
+ const isc::asiolink::IOAddress& addr,
+ int port);
+
+ // TODO: having 2 maps (ifindex->iface and ifname->iface would)
+ // probably be better for performance reasons
+ IfaceLst ifaces_;
+
+ static IfaceMgr * instance_;
+
+ // TODO: Also keep this interface on Iface once interface detection
+ // is implemented. We may need it e.g. to close all sockets on
+ // specific interface
+ int recvsock_; // TODO: should be fd_set eventually, but we have only
+ int sendsock_; // 2 sockets for now. Will do for until next release
+ // we can't use the same socket, as receiving socket
+ // is bound to multicast address. And we all know what happens
+ // to people who try to use multicast as source address.
+
+ char * control_buf_;
+ int control_buf_len_;
+
+ private:
+ bool openSockets();
+ static void instanceCreate();
+ bool joinMcast(int sock, const std::string& ifname,
+ const std::string& mcast);
+ };
+};
+
+#endif
diff --git a/src/bin/dhcp6/interfaces.txt b/src/bin/dhcp6/interfaces.txt
new file mode 100644
index 0000000..6a64309
--- /dev/null
+++ b/src/bin/dhcp6/interfaces.txt
@@ -0,0 +1,10 @@
+eth0 fe80::21e:8cff:fe9b:7349
+
+#
+# only first line is read.
+# please use following format:
+# interface-name link-local-ipv6-address
+#
+# This file will become obsolete once proper interface detection
+# is implemented.
+#
diff --git a/src/bin/dhcp6/main.cc b/src/bin/dhcp6/main.cc
index 75af3d9..95d2261 100644
--- a/src/bin/dhcp6/main.cc
+++ b/src/bin/dhcp6/main.cc
@@ -33,7 +33,7 @@
#include <log/dummylog.h>
#include <dhcp6/spec_config.h>
-
+#include "dhcp6/dhcp6_srv.h"
using namespace std;
using namespace isc::util;
@@ -42,15 +42,16 @@ using namespace isc::cc;
using namespace isc::config;
using namespace isc::util;
+using namespace isc;
+
namespace {
bool verbose_mode = false;
void
usage() {
- cerr << "Usage: b10-dhcp6 [-u user] [-v]"
+ cerr << "Usage: b10-dhcp6 [-v]"
<< endl;
- cerr << "\t-u: change process UID to the specified user" << endl;
cerr << "\t-v: verbose output" << endl;
exit(1);
}
@@ -59,40 +60,32 @@ usage() {
int
main(int argc, char* argv[]) {
int ch;
- const char* uid = NULL;
- bool cache = true;
- while ((ch = getopt(argc, argv, ":nu:v")) != -1) {
+ while ((ch = getopt(argc, argv, ":v")) != -1) {
switch (ch) {
- case 'n':
- cache = false;
- break;
- case 'u':
- uid = optarg;
- break;
case 'v':
verbose_mode = true;
isc::log::denabled = true;
break;
- case '?':
+ case ':':
default:
usage();
}
}
+ cout << "My pid=" << getpid() << endl;
+
if (argc - optind > 0) {
usage();
}
int ret = 0;
- // XXX: we should eventually pass io_service here.
+ // TODO remainder of auth to dhcp6 code copy. We need to enable this in
+ // dhcp6 eventually
#if 0
Session* cc_session = NULL;
- Session* xfrin_session = NULL;
Session* statistics_session = NULL;
- bool xfrin_session_established = false; // XXX (see Trac #287)
- bool statistics_session_established = false; // XXX (see Trac #287)
ModuleCCSession* config_session = NULL;
#endif
try {
@@ -108,15 +101,14 @@ main(int argc, char* argv[]) {
// auth_server->setVerbose(verbose_mode);
cout << "[b10-dhcp6] Initiating DHCPv6 operation." << endl;
+ Dhcpv6Srv* srv = new Dhcpv6Srv();
+
+ srv->run();
+
} catch (const std::exception& ex) {
cerr << "[b10-dhcp6] Server failed: " << ex.what() << endl;
ret = 1;
}
- while (true) {
- sleep(10);
- cout << "[b10-dhcp6] I'm alive." << endl;
- }
-
return (ret);
}
diff --git a/src/bin/dhcp6/pkt6.cc b/src/bin/dhcp6/pkt6.cc
new file mode 100644
index 0000000..5dcab86
--- /dev/null
+++ b/src/bin/dhcp6/pkt6.cc
@@ -0,0 +1,46 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#include "dhcp6/dhcp6.h"
+#include "dhcp6/pkt6.h"
+#include <iostream>
+
+namespace isc {
+
+///
+/// constructor
+///
+/// \param dataLen - length of the data to be allocated
+///
+Pkt6::Pkt6(int dataLen)
+ :local_addr_("::"),
+ remote_addr_("::") {
+ try {
+ data_ = boost::shared_array<char>(new char[dataLen]);
+ data_len_ = dataLen;
+ } catch (const std::exception& ex) {
+ // TODO move to LOG_FATAL()
+ // let's continue with empty pkt for now
+ std::cout << "Failed to allocate " << dataLen << " bytes."
+ << std::endl;
+ data_len_ = 0;
+ }
+}
+
+Pkt6::~Pkt6() {
+ // no need to delete anything shared_ptr will take care of data_
+}
+
+};
diff --git a/src/bin/dhcp6/pkt6.h b/src/bin/dhcp6/pkt6.h
new file mode 100644
index 0000000..9a14d92
--- /dev/null
+++ b/src/bin/dhcp6/pkt6.h
@@ -0,0 +1,62 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef PKT6_H
+#define PKT6_H
+
+#include <iostream>
+#include <boost/shared_array.hpp>
+#include "io_address.h"
+
+namespace isc {
+
+ class Pkt6 {
+ public:
+ Pkt6(int len);
+ ~Pkt6();
+
+ // XXX: probably need getter/setter wrappers
+ // and hide fields as protected
+ // buffer that holds memory. It is shared_array as options may
+ // share pointer to this buffer
+ boost::shared_array<char> data_;
+
+ // length of the data
+ int data_len_;
+
+ // local address (destination if receiving packet, source if sending packet)
+ isc::asiolink::IOAddress local_addr_;
+
+ // remote address (source if receiving packet, destination if sending packet)
+ isc::asiolink::IOAddress remote_addr_;
+
+ // name of the network interface the packet was received/to be sent over
+ std::string iface_;
+
+ // interface index (each network interface has assigned unique ifindex
+ // it is functional equvalent of name, but sometimes more useful, e.g.
+ // when using crazy systems that allow spaces in interface names (Windows)
+ int ifindex_;
+
+ // local TDP or UDP port
+ int local_port_;
+
+ // remote TCP or UDP port
+ int remote_port_;
+
+ // XXX: add *a lot* here
+ };
+}
+
+#endif
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index 4a0e918..ae9d8e3 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -8,15 +8,60 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
+
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += -I$(top_builddir)/src/bin # for generated spec_config.h header
+AM_CPPFLAGS += -I$(top_srcdir)/src/bin
+AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
+AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_top_srcdir)/src/lib/testutils/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/bin/dhcp6/tests\"
+AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
+
+CLEANFILES = $(builddir)/interfaces.txt
+
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+TESTS =
+if HAVE_GTEST
+
+TESTS += dhcp6_unittests
+
+dhcp6_unittests_SOURCES = ../pkt6.h ../pkt6.cc
+dhcp6_unittests_SOURCES += ../iface_mgr.h ../iface_mgr.cc
+dhcp6_unittests_SOURCES += ../dhcp6_srv.h ../dhcp6_srv.cc
+dhcp6_unittests_SOURCES += dhcp6_unittests.cc
+dhcp6_unittests_SOURCES += pkt6_unittest.cc
+dhcp6_unittests_SOURCES += iface_mgr_unittest.cc
+dhcp6_unittests_SOURCES += dhcp6_srv_unittest.cc
+
+dhcp6_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+dhcp6_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+dhcp6_unittests_LDADD = $(GTEST_LDADD)
+dhcp6_unittests_LDADD += $(SQLITE_LIBS)
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
new file mode 100644
index 0000000..96c767e
--- /dev/null
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -0,0 +1,53 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+
+#include "dhcp6/dhcp6_srv.h"
+
+using namespace std;
+using namespace isc;
+
+namespace {
+class Dhcpv6SrvTest : public ::testing::Test {
+public:
+ Dhcpv6SrvTest() {
+ }
+};
+
+TEST_F(Dhcpv6SrvTest, basic) {
+ // there's almost no code now. What's there provides echo capability
+ // that is just a proof of concept and will be removed soon
+ // No need to thoroughly test it
+
+ // srv has stubbed interface detection. It will read
+ // interfaces.txt instead. It will pretend to have detected
+ // fe80::1234 link-local address on eth0 interface. Obviously
+
+ // an attempt to bind this socket will fail.
+ EXPECT_NO_THROW( {
+ Dhcpv6Srv * srv = new Dhcpv6Srv();
+
+ delete srv;
+ });
+
+}
+
+}
diff --git a/src/bin/dhcp6/tests/dhcp6_unittests.cc b/src/bin/dhcp6/tests/dhcp6_unittests.cc
new file mode 100644
index 0000000..360fb71
--- /dev/null
+++ b/src/bin/dhcp6/tests/dhcp6_unittests.cc
@@ -0,0 +1,28 @@
+// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdio.h>
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+
+int
+main(int argc, char* argv[]) {
+
+ ::testing::InitGoogleTest(&argc, argv);
+ isc::log::initLogger();
+
+ int result = RUN_ALL_TESTS();
+
+ return result;
+}
diff --git a/src/bin/dhcp6/tests/iface_mgr_unittest.cc b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
new file mode 100644
index 0000000..a726511
--- /dev/null
+++ b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
@@ -0,0 +1,262 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <fstream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include "io_address.h"
+#include "dhcp6/pkt6.h"
+#include "dhcp6/iface_mgr.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+
+namespace {
+const char* const INTERFACE_FILE = TEST_DATA_BUILDDIR "/interfaces.txt";
+
+class NakedIfaceMgr: public IfaceMgr {
+ // "naked" Interface Manager, exposes internal fields
+public:
+ NakedIfaceMgr() { }
+ IfaceLst & getIfacesLst() { return ifaces_; }
+ void setSendSock(int sock) { sendsock_ = sock; }
+ void setRecvSock(int sock) { recvsock_ = sock; }
+
+ int openSocket(const std::string& ifname,
+ const isc::asiolink::IOAddress& addr,
+ int port) {
+ return IfaceMgr::openSocket(ifname, addr, port);
+ }
+
+};
+
+// dummy class for now, but this will be expanded when needed
+class IfaceMgrTest : public ::testing::Test {
+public:
+ IfaceMgrTest() {
+ }
+};
+
+TEST_F(IfaceMgrTest, basic) {
+ // checks that IfaceManager can be instantiated
+
+ IfaceMgr & ifacemgr = IfaceMgr::instance();
+ ASSERT_TRUE(&ifacemgr != 0);
+}
+
+TEST_F(IfaceMgrTest, ifaceClass) {
+ // basic tests for Iface inner class
+
+ IfaceMgr::Iface * iface = new IfaceMgr::Iface("eth5", 7);
+
+ EXPECT_STREQ("eth5/7", iface->getFullName().c_str());
+
+ delete iface;
+
+}
+
+// TODO: Implement getPlainMac() test as soon as interface detection is implemented.
+
+TEST_F(IfaceMgrTest, getIface) {
+
+ cout << "Interface checks. Please ignore socket binding errors." << endl;
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ // interface name, ifindex
+ IfaceMgr::Iface iface1("lo", 1);
+ IfaceMgr::Iface iface2("eth5", 2);
+ IfaceMgr::Iface iface3("en3", 5);
+ IfaceMgr::Iface iface4("e1000g0", 3);
+
+ ifacemgr->getIfacesLst().push_back(iface1);
+ ifacemgr->getIfacesLst().push_back(iface2);
+ ifacemgr->getIfacesLst().push_back(iface3);
+ ifacemgr->getIfacesLst().push_back(iface4);
+
+ // check that interface can be retrieved by ifindex
+ IfaceMgr::Iface * tmp = ifacemgr->getIface(5);
+ // ASSERT_NE(NULL, tmp); is not supported. hmmmm.
+ ASSERT_TRUE( tmp != NULL );
+
+ EXPECT_STREQ( "en3", tmp->name_.c_str() );
+ EXPECT_EQ(5, tmp->ifindex_);
+
+ // check that interface can be retrieved by name
+ tmp = ifacemgr->getIface("lo");
+ ASSERT_TRUE( tmp != NULL );
+
+ EXPECT_STREQ( "lo", tmp->name_.c_str() );
+ EXPECT_EQ(1, tmp->ifindex_);
+
+ // check that non-existing interfaces are not returned
+ EXPECT_EQ(0, ifacemgr->getIface("wifi0") );
+
+ delete ifacemgr;
+}
+
+TEST_F(IfaceMgrTest, detectIfaces) {
+
+ // test detects that interfaces can be detected
+ // there is no code for that now, but interfaces are
+ // read from file
+ fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+ fakeifaces << "eth0 fe80::1234";
+ fakeifaces.close();
+
+ // this is not usable on systems that don't have eth0
+ // interfaces. Nevertheless, this fake interface should
+ // be on list, but if_nametoindex() will fail.
+
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ ASSERT_TRUE( ifacemgr->getIface("eth0") != NULL );
+
+ IfaceMgr::Iface * eth0 = ifacemgr->getIface("eth0");
+
+ // there should be one address
+ EXPECT_EQ(1, eth0->addrs_.size());
+
+ IOAddress * addr = &(*eth0->addrs_.begin());
+ ASSERT_TRUE( addr != NULL );
+
+ EXPECT_STREQ( "fe80::1234", addr->toText().c_str() );
+
+ delete ifacemgr;
+}
+
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+// Fix for this is available on 1186 branch, will reenable
+// this test once 1186 is merged
+TEST_F(IfaceMgrTest, DISABLED_sockets) {
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ IOAddress loAddr("::1");
+
+ // bind multicast socket to port 10547
+ int socket1 = ifacemgr->openSocket("lo", loAddr, 10547);
+ EXPECT_GT(socket1, 0); // socket > 0
+
+ // bind unicast socket to port 10548
+ int socket2 = ifacemgr->openSocket("lo", loAddr, 10548);
+ EXPECT_GT(socket2, 0);
+
+ // expect success. This address/port is already bound, but
+ // we are using SO_REUSEADDR, so we can bind it twice
+ int socket3 = ifacemgr->openSocket("lo", loAddr, 10547);
+ EXPECT_GT(socket3, 0); // socket > 0
+
+ // we now have 3 sockets open at the same time. Looks good.
+
+ close(socket1);
+ close(socket2);
+ close(socket3);
+
+ delete ifacemgr;
+}
+
+TEST_F(IfaceMgrTest, socketsMcast) {
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ IOAddress loAddr("::1");
+ IOAddress mcastAddr("ff02::1:2");
+
+ // bind multicast socket to port 10547
+ int socket1 = ifacemgr->openSocket("lo", mcastAddr, 10547);
+ EXPECT_GT(socket1, 0); // socket > 0
+
+ // expect success. This address/port is already bound, but
+ // we are using SO_REUSEADDR, so we can bind it twice
+ int socket2 = ifacemgr->openSocket("lo", mcastAddr, 10547);
+ EXPECT_GT(socket2, 0);
+
+ // there's no good way to test negative case here.
+ // we would need non-multicast interface. We will be able
+ // to iterate thru available interfaces and check if there
+ // are interfaces without multicast-capable flag.
+
+ close(socket1);
+ close(socket2);
+
+ delete ifacemgr;
+}
+
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+// Fix for this is available on 1186 branch, will reenable
+// this test once 1186 is merged
+TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+ fakeifaces << "lo ::1";
+ fakeifaces.close();
+
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ // let's assume that every supported OS have lo interface
+ IOAddress loAddr("::1");
+ int socket1 = ifacemgr->openSocket("lo", loAddr, 10547);
+ int socket2 = ifacemgr->openSocket("lo", loAddr, 10546);
+
+ ifacemgr->setSendSock(socket2);
+ ifacemgr->setRecvSock(socket1);
+
+ Pkt6 sendPkt(128);
+
+ // prepare dummy payload
+ for (int i=0;i<128; i++) {
+ sendPkt.data_[i] = i;
+ }
+
+ sendPkt.remote_port_ = 10547;
+ sendPkt.remote_addr_ = IOAddress("::1");
+ sendPkt.ifindex_ = 1;
+ sendPkt.iface_ = "lo";
+
+ Pkt6 * rcvPkt;
+
+ EXPECT_EQ(true, ifacemgr->send(sendPkt));
+
+ rcvPkt = ifacemgr->receive();
+
+ ASSERT_TRUE( rcvPkt != NULL ); // received our own packet
+
+ // let's check that we received what was sent
+ EXPECT_EQ(sendPkt.data_len_, rcvPkt->data_len_);
+ EXPECT_EQ(0, memcmp(&sendPkt.data_[0], &rcvPkt->data_[0],
+ rcvPkt->data_len_) );
+
+ EXPECT_EQ(sendPkt.remote_addr_, rcvPkt->remote_addr_);
+ EXPECT_EQ(rcvPkt->remote_port_, 10546);
+
+ delete rcvPkt;
+
+ delete ifacemgr;
+}
+
+}
diff --git a/src/bin/dhcp6/tests/pkt6_unittest.cc b/src/bin/dhcp6/tests/pkt6_unittest.cc
new file mode 100644
index 0000000..5054c45
--- /dev/null
+++ b/src/bin/dhcp6/tests/pkt6_unittest.cc
@@ -0,0 +1,44 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+
+#include "dhcp6/pkt6.h"
+
+using namespace std;
+using namespace isc;
+
+namespace {
+// empty class for now, but may be extended once Addr6 becomes bigger
+class Pkt6Test : public ::testing::Test {
+public:
+ Pkt6Test() {
+ }
+};
+
+TEST_F(Pkt6Test, constructor) {
+ Pkt6 * pkt1 = new Pkt6(17);
+
+ ASSERT_EQ(pkt1->data_len_, 17);
+
+ delete pkt1;
+}
+
+}
diff --git a/src/bin/host/b10-host.1 b/src/bin/host/b10-host.1
index ed0068b..050f6a3 100644
--- a/src/bin/host/b10-host.1
+++ b/src/bin/host/b10-host.1
@@ -103,10 +103,6 @@ It doesn\'t use
at this time\&. The default name server used is 127\&.0\&.0\&.1\&.
.PP
-\fBb10\-host\fR
-does not do reverse lookups by default yet (by detecting if name is a IPv4 or IPv6 address)\&.
-.PP
-
\fB\-p\fR
is not a standard feature\&.
.SH "HISTORY"
diff --git a/src/bin/host/b10-host.xml b/src/bin/host/b10-host.xml
index 7da07dd..a17ef67 100644
--- a/src/bin/host/b10-host.xml
+++ b/src/bin/host/b10-host.xml
@@ -176,11 +176,6 @@
</para>
<para>
- <command>b10-host</command> does not do reverse lookups by
- default yet (by detecting if name is a IPv4 or IPv6 address).
- </para>
-
- <para>
<option>-p</option> is not a standard feature.
</para>
</refsect1>
diff --git a/src/bin/loadzone/Makefile.am b/src/bin/loadzone/Makefile.am
index 74d4dd4..a235d68 100644
--- a/src/bin/loadzone/Makefile.am
+++ b/src/bin/loadzone/Makefile.am
@@ -1,5 +1,6 @@
SUBDIRS = . tests/correct tests/error
bin_SCRIPTS = b10-loadzone
+noinst_SCRIPTS = run_loadzone.sh
CLEANFILES = b10-loadzone
diff --git a/src/bin/loadzone/run_loadzone.sh.in b/src/bin/loadzone/run_loadzone.sh.in
index 95de396..43b7920 100755
--- a/src/bin/loadzone/run_loadzone.sh.in
+++ b/src/bin/loadzone/run_loadzone.sh.in
@@ -18,14 +18,14 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/loadzone/tests/correct/Makefile.am b/src/bin/loadzone/tests/correct/Makefile.am
index 3507bfa..fb882ba 100644
--- a/src/bin/loadzone/tests/correct/Makefile.am
+++ b/src/bin/loadzone/tests/correct/Makefile.am
@@ -13,11 +13,13 @@ EXTRA_DIST += ttl2.db
EXTRA_DIST += ttlext.db
EXTRA_DIST += example.db
+noinst_SCRIPTS = correct_test.sh
+
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# TODO: maybe use TESTS?
diff --git a/src/bin/loadzone/tests/correct/correct_test.sh.in b/src/bin/loadzone/tests/correct/correct_test.sh.in
old mode 100644
new mode 100755
index 509d8e5..d944451
--- a/src/bin/loadzone/tests/correct/correct_test.sh.in
+++ b/src/bin/loadzone/tests/correct/correct_test.sh.in
@@ -18,7 +18,7 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
export PYTHONPATH
LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
diff --git a/src/bin/loadzone/tests/error/Makefile.am b/src/bin/loadzone/tests/error/Makefile.am
index 87bb1cf..03263b7 100644
--- a/src/bin/loadzone/tests/error/Makefile.am
+++ b/src/bin/loadzone/tests/error/Makefile.am
@@ -12,11 +12,13 @@ EXTRA_DIST += keyerror3.db
EXTRA_DIST += originerr1.db
EXTRA_DIST += originerr2.db
+noinst_SCRIPTS = error_test.sh
+
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# TODO: use TESTS ?
diff --git a/src/bin/loadzone/tests/error/error_test.sh.in b/src/bin/loadzone/tests/error/error_test.sh.in
old mode 100644
new mode 100755
index d1d6bd1..94c5edb
--- a/src/bin/loadzone/tests/error/error_test.sh.in
+++ b/src/bin/loadzone/tests/error/error_test.sh.in
@@ -18,7 +18,7 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
export PYTHONPATH
LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
diff --git a/src/bin/msgq/tests/Makefile.am b/src/bin/msgq/tests/Makefile.am
index 50c1e6e..50b218b 100644
--- a/src/bin/msgq/tests/Makefile.am
+++ b/src/bin/msgq/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,7 +19,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/msgq:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/msgq \
BIND10_TEST_SOCKET_FILE=$(builddir)/test_msgq_socket.sock \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/resolver/b10-resolver.8 b/src/bin/resolver/b10-resolver.8
index 849092c..9161ec2 100644
--- a/src/bin/resolver/b10-resolver.8
+++ b/src/bin/resolver/b10-resolver.8
@@ -2,12 +2,12 @@
.\" Title: b10-resolver
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: February 17, 2011
+.\" Date: August 17, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-RESOLVER" "8" "February 17, 2011" "BIND10" "BIND10"
+.TH "B10\-RESOLVER" "8" "August 17, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -54,7 +54,7 @@ must be either a valid numeric user ID or a valid user name\&. By default the da
.PP
\fB\-v\fR
.RS 4
-Enabled verbose mode\&. This enables diagnostic messages to STDERR\&.
+Enable verbose mode\&. This sets logging to the maximum debugging level\&.
.RE
.SH "CONFIGURATION AND COMMANDS"
.PP
@@ -77,6 +77,25 @@ string and
number\&. The defaults are address ::1 port 53 and address 127\&.0\&.0\&.1 port 53\&.
.PP
+
+
+
+
+
+\fIquery_acl\fR
+is a list of query access control rules\&. The list items are the
+\fIaction\fR
+string and the
+\fIfrom\fR
+or
+\fIkey\fR
+strings\&. The possible actions are ACCEPT, REJECT and DROP\&. The
+\fIfrom\fR
+is a remote (source) IPv4 or IPv6 address or special keyword\&. The
+\fIkey\fR
+is a TSIG key name\&. The default configuration accepts queries from 127\&.0\&.0\&.1 and ::1\&.
+.PP
+
\fIretries\fR
is the number of times to retry (resend query) after a query timeout (\fItimeout_query\fR)\&. The default is 3\&.
.PP
@@ -88,7 +107,7 @@ to use directly as root servers to start resolving\&. The list items are the
\fIaddress\fR
string and
\fIport\fR
-number\&. If empty, a hardcoded address for F\-root (192\&.5\&.5\&.241) is used\&.
+number\&. By default, a hardcoded address for l\&.root\-servers\&.net (199\&.7\&.83\&.42 or 2001:500:3::42) is used\&.
.PP
\fItimeout_client\fR
@@ -121,8 +140,7 @@ BIND 10 Guide\&.
.PP
The
\fBb10\-resolver\fR
-daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&.
-
+daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&. Caching was implemented in February 2011\&. Access control was introduced in June 2011\&.
.SH "COPYRIGHT"
.br
Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/resolver/b10-resolver.xml b/src/bin/resolver/b10-resolver.xml
index bdf4f8a..75cced7 100644
--- a/src/bin/resolver/b10-resolver.xml
+++ b/src/bin/resolver/b10-resolver.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>February 17, 2011</date>
+ <date>August 17, 2011</date>
</refentryinfo>
<refmeta>
@@ -99,11 +99,14 @@
</listitem>
</varlistentry>
+<!-- TODO: this needs to be fixed as -v on command line
+should imply stdout or stderr output also -->
+<!-- TODO: can this -v be overidden by configuration or bindctl? -->
<varlistentry>
<term><option>-v</option></term>
<listitem><para>
- Enabled verbose mode. This enables diagnostic messages to
- STDERR.
+ Enable verbose mode.
+ This sets logging to the maximum debugging level.
</para></listitem>
</varlistentry>
@@ -147,6 +150,22 @@ once that is merged you can for instance do 'config add Resolver/forward_address
</para>
<para>
+<!-- TODO: need more explanation or point to guide. -->
+<!-- TODO: what about a netmask or cidr? -->
+<!-- TODO: document "key" -->
+<!-- TODO: where are the TSIG keys defined? -->
+<!-- TODO: key and from are mutually exclusive? what if both defined? -->
+ <varname>query_acl</varname> is a list of query access control
+ rules. The list items are the <varname>action</varname> string
+ and the <varname>from</varname> or <varname>key</varname> strings.
+ The possible actions are ACCEPT, REJECT and DROP.
+ The <varname>from</varname> is a remote (source) IPv4 or IPv6
+ address or special keyword.
+ The <varname>key</varname> is a TSIG key name.
+ The default configuration accepts queries from 127.0.0.1 and ::1.
+ </para>
+
+ <para>
<varname>retries</varname> is the number of times to retry
(resend query) after a query timeout
(<varname>timeout_query</varname>).
@@ -159,8 +178,10 @@ once that is merged you can for instance do 'config add Resolver/forward_address
root servers to start resolving.
The list items are the <varname>address</varname> string
and <varname>port</varname> number.
- If empty, a hardcoded address for F-root (192.5.5.241) is used.
+ By default, a hardcoded address for l.root-servers.net
+ (199.7.83.42 or 2001:500:3::42) is used.
</para>
+<!-- TODO: this is broken, see ticket #1184 -->
<para>
<varname>timeout_client</varname> is the number of milliseconds
@@ -234,7 +255,8 @@ once that is merged you can for instance do 'config add Resolver/forward_address
The <command>b10-resolver</command> daemon was first coded in
September 2010. The initial implementation only provided
forwarding. Iteration was introduced in January 2011.
-<!-- TODO: document when caching was added -->
+ Caching was implemented in February 2011.
+ Access control was introduced in June 2011.
<!-- TODO: document when validation was added -->
</para>
</refsect1>
diff --git a/src/bin/stats/Makefile.am b/src/bin/stats/Makefile.am
index e830f65..63e2a3b 100644
--- a/src/bin/stats/Makefile.am
+++ b/src/bin/stats/Makefile.am
@@ -5,18 +5,23 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
pkglibexec_SCRIPTS = b10-stats b10-stats-httpd
b10_statsdir = $(pkgdatadir)
-b10_stats_DATA = stats.spec stats-httpd.spec stats-schema.spec
+b10_stats_DATA = stats.spec stats-httpd.spec
b10_stats_DATA += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
-pyexec_DATA = stats_messages.py stats_httpd_messages.py
+
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+nodist_pylogmessage_PYTHON += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
CLEANFILES = b10-stats stats.pyc
CLEANFILES += b10-stats-httpd stats_httpd.pyc
-CLEANFILES += stats_messages.py stats_messages.pyc
-CLEANFILES += stats_httpd_messages.py stats_httpd_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.pyc
man_MANS = b10-stats.8 b10-stats-httpd.8
EXTRA_DIST = $(man_MANS) b10-stats.xml b10-stats-httpd.xml
-EXTRA_DIST += stats.spec stats-httpd.spec stats-schema.spec
+EXTRA_DIST += stats.spec stats-httpd.spec
EXTRA_DIST += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
EXTRA_DIST += stats_messages.mes stats_httpd_messages.mes
@@ -30,18 +35,20 @@ b10-stats-httpd.8: b10-stats-httpd.xml
endif
-stats_messages.py: stats_messages.mes
- $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/stats/stats_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py : stats_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/stats_messages.mes
-stats_httpd_messages.py: stats_httpd_messages.mes
- $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/stats/stats_httpd_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py : stats_httpd_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/stats_httpd_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-stats: stats.py
+b10-stats: stats.py $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" stats.py >$@
chmod a+x $@
-b10-stats-httpd: stats_httpd.py
+b10-stats-httpd: stats_httpd.py $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" stats_httpd.py >$@
chmod a+x $@
diff --git a/src/bin/stats/b10-stats-httpd.8 b/src/bin/stats/b10-stats-httpd.8
index ed4aafa..1206e1d 100644
--- a/src/bin/stats/b10-stats-httpd.8
+++ b/src/bin/stats/b10-stats-httpd.8
@@ -36,7 +36,7 @@ b10-stats-httpd \- BIND 10 HTTP server for HTTP/XML interface of statistics
.PP
\fBb10\-stats\-httpd\fR
-is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data from
+is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data or its schema from
\fBb10\-stats\fR, and it sends the data back in Python dictionary format and the server converts it into XML format\&. The server sends it to the HTTP client\&. The server can send three types of document, which are XML (Extensible Markup Language), XSD (XML Schema definition) and XSL (Extensible Stylesheet Language)\&. The XML document is the statistics data of BIND 10, The XSD document is the data schema of it, and The XSL document is the style sheet to be showed for the web browsers\&. There is different URL for each document\&. But please note that you would be redirected to the URL of XML document if you request the URL of the root document\&. For example, you would be redirected to http://127\&.0\&.0\&.1:8000/bind10/statistics/xml if you request http://127\&.0\&.0\&.1:8000/\&. Please see the manual and the spec file of
\fBb10\-stats\fR
for more details about the items of BIND 10 statistics\&. The server uses CC session in communication with
@@ -66,10 +66,6 @@ bindctl(1)\&. Please see the manual of
bindctl(1)
about how to configure the settings\&.
.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
-.PP
/usr/local/share/bind10\-devel/stats\-httpd\-xml\&.tpl
\(em the template file of XML document\&.
diff --git a/src/bin/stats/b10-stats-httpd.xml b/src/bin/stats/b10-stats-httpd.xml
index 34c704f..c8df9b8 100644
--- a/src/bin/stats/b10-stats-httpd.xml
+++ b/src/bin/stats/b10-stats-httpd.xml
@@ -57,7 +57,7 @@
by the BIND 10 boss process (<command>bind10</command>) and eventually
exited by it. The server is intended to be server requests by HTTP
clients like web browsers and third-party modules. When the server is
- asked, it requests BIND 10 statistics data from
+ asked, it requests BIND 10 statistics data or its schema from
<command>b10-stats</command>, and it sends the data back in Python
dictionary format and the server converts it into XML format. The server
sends it to the HTTP client. The server can send three types of document,
@@ -112,12 +112,6 @@
of <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum> about
how to configure the settings.
</para>
- <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
- <!--TODO: The filename should be computed from prefix-->
- — This is a spec file for data schema of
- of BIND 10 statistics. This schema cannot be configured
- via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
- </para>
<para>
<filename>/usr/local/share/bind10-devel/stats-httpd-xml.tpl</filename>
<!--TODO: The filename should be computed from prefix-->
@@ -138,7 +132,7 @@
<refsect1>
<title>CONFIGURATION AND COMMANDS</title>
<para>
- The configurable setting in
+ The configurable setting in
<filename>stats-httpd.spec</filename> is:
</para>
<variablelist>
diff --git a/src/bin/stats/b10-stats.8 b/src/bin/stats/b10-stats.8
index f69e4d3..0204ca1 100644
--- a/src/bin/stats/b10-stats.8
+++ b/src/bin/stats/b10-stats.8
@@ -1,22 +1,13 @@
'\" t
.\" Title: b10-stats
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
-.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: Oct 15, 2010
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-STATS" "8" "Oct 15, 2010" "BIND10" "BIND10"
-.\" -----------------------------------------------------------------
-.\" * Define some portability stuff
-.\" -----------------------------------------------------------------
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.\" http://bugs.debian.org/507673
-.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.ie \n(.g .ds Aq \(aq
-.el .ds Aq '
+.TH "B10\-STATS" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -45,9 +36,9 @@ with other modules like
\fBb10\-auth\fR
and so on\&. It waits for coming data from other modules, then other modules send data to stats module periodically\&. Other modules send stats data to stats module independently from implementation of stats module, so the frequency of sending data may not be constant\&. Stats module collects data and aggregates it\&.
\fBb10\-stats\fR
-invokes "sendstats" command for
+invokes an internal command for
\fBbind10\fR
-after its initial starting because it\*(Aqs sure to collect statistics data from
+after its initial starting because it\'s sure to collect statistics data from
\fBbind10\fR\&.
.SH "OPTIONS"
.PP
@@ -59,6 +50,84 @@ This
\fBb10\-stats\fR
switches to verbose mode\&. It sends verbose messages to STDOUT\&.
.RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The
+\fBb10\-stats\fR
+command does not have any configurable settings\&.
+.PP
+The configuration commands are:
+.PP
+
+
+\fBremove\fR
+removes the named statistics name and data\&.
+.PP
+
+
+\fBreset\fR
+will reset all statistics data to default values except for constant names\&. This may re\-add previously removed statistics names\&.
+.PP
+
+\fBset\fR
+.PP
+
+\fBshow\fR
+will send the statistics data in JSON format\&. By default, it outputs all the statistics data it has collected\&. An optional item name may be specified to receive individual output\&.
+.PP
+
+\fBshutdown\fR
+will shutdown the
+\fBb10\-stats\fR
+process\&. (Note that the
+\fBbind10\fR
+parent may restart it\&.)
+.PP
+
+\fBstatus\fR
+simply indicates that the daemon is running\&.
+.SH "STATISTICS DATA"
+.PP
+The
+\fBb10\-stats\fR
+daemon contains these statistics:
+.PP
+report_time
+.RS 4
+The latest report date and time in ISO 8601 format\&.
+.RE
+.PP
+stats\&.boot_time
+.RS 4
+The date and time when this daemon was started in ISO 8601 format\&. This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.
+.RE
+.PP
+stats\&.last_update_time
+.RS 4
+The date and time (in ISO 8601 format) when this daemon last received data from another component\&.
+.RE
+.PP
+stats\&.lname
+.RS 4
+This is the name used for the
+\fBb10\-msgq\fR
+command\-control channel\&. (This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.)
+.RE
+.PP
+stats\&.start_time
+.RS 4
+This is the date and time (in ISO 8601 format) when this daemon started collecting data\&.
+.RE
+.PP
+stats\&.timestamp
+.RS 4
+The current date and time represented in seconds since UNIX epoch (1970\-01\-01T0 0:00:00Z) with precision (delimited with a period) up to one hundred thousandth of second\&.
+.RE
+.PP
+See other manual pages for explanations for their statistics that are kept track by
+\fBb10\-stats\fR\&.
.SH "FILES"
.PP
/usr/local/share/bind10\-devel/stats\&.spec
@@ -66,10 +135,6 @@ switches to verbose mode\&. It sends verbose messages to STDOUT\&.
\fBb10\-stats\fR\&. It contains commands for
\fBb10\-stats\fR\&. They can be invoked via
bindctl(1)\&.
-.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
.SH "SEE ALSO"
.PP
@@ -82,7 +147,7 @@ BIND 10 Guide\&.
.PP
The
\fBb10\-stats\fR
-daemon was initially designed and implemented by Naoki Kambe of JPRS in Oct 2010\&.
+daemon was initially designed and implemented by Naoki Kambe of JPRS in October 2010\&.
.SH "COPYRIGHT"
.br
Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index f0c472d..13ada7a 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>Oct 15, 2010</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -64,9 +64,10 @@
send stats data to stats module independently from
implementation of stats module, so the frequency of sending data
may not be constant. Stats module collects data and aggregates
- it. <command>b10-stats</command> invokes "sendstats" command
+ it. <command>b10-stats</command> invokes an internal command
for <command>bind10</command> after its initial starting because it's
sure to collect statistics data from <command>bind10</command>.
+<!-- TODO: reword that last sentence? -->
</para>
</refsect1>
@@ -87,6 +88,123 @@
</refsect1>
<refsect1>
+ <title>CONFIGURATION AND COMMANDS</title>
+
+ <para>
+ The <command>b10-stats</command> command does not have any
+ configurable settings.
+ </para>
+
+<!-- TODO: formating -->
+ <para>
+ The configuration commands are:
+ </para>
+
+ <para>
+<!-- TODO: remove is removed in trac930 -->
+ <command>remove</command> removes the named statistics name and data.
+ </para>
+
+ <para>
+<!-- TODO: reset is removed in trac930 -->
+ <command>reset</command> will reset all statistics data to
+ default values except for constant names.
+ This may re-add previously removed statistics names.
+ </para>
+
+ <para>
+ <command>set</command>
+<!-- TODO: document this -->
+ </para>
+
+ <para>
+ <command>show</command> will send the statistics data
+ in JSON format.
+ By default, it outputs all the statistics data it has collected.
+ An optional item name may be specified to receive individual output.
+ </para>
+
+<!-- TODO: document showschema -->
+
+ <para>
+ <command>shutdown</command> will shutdown the
+ <command>b10-stats</command> process.
+ (Note that the <command>bind10</command> parent may restart it.)
+ </para>
+
+ <para>
+ <command>status</command> simply indicates that the daemon is
+ running.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The <command>b10-stats</command> daemon contains these statistics:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>report_time</term>
+<!-- TODO: why not named stats.report_time? -->
+ <listitem><simpara>The latest report date and time in
+ ISO 8601 format.</simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.boot_time</term>
+ <listitem><simpara>The date and time when this daemon was
+ started in ISO 8601 format.
+ This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.last_update_time</term>
+ <listitem><simpara>The date and time (in ISO 8601 format)
+ when this daemon last received data from another component.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.lname</term>
+ <listitem><simpara>This is the name used for the
+ <command>b10-msgq</command> command-control channel.
+ (This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.)
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.start_time</term>
+ <listitem><simpara>This is the date and time (in ISO 8601 format)
+ when this daemon started collecting data.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.timestamp</term>
+ <listitem><simpara>The current date and time represented in
+ seconds since UNIX epoch (1970-01-01T0 0:00:00Z) with
+ precision (delimited with a period) up to
+ one hundred thousandth of second.</simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <para>
+ See other manual pages for explanations for their statistics
+ that are kept track by <command>b10-stats</command>.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para><filename>/usr/local/share/bind10-devel/stats.spec</filename>
<!--TODO: The filename should be computed from prefix-->
@@ -95,12 +213,6 @@
invoked
via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
</para>
- <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
- <!--TODO: The filename should be computed from prefix-->
- — This is a spec file for data schema of
- of BIND 10 statistics. This schema cannot be configured
- via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
- </para>
</refsect1>
<refsect1>
@@ -126,7 +238,7 @@
<title>HISTORY</title>
<para>
The <command>b10-stats</command> daemon was initially designed
- and implemented by Naoki Kambe of JPRS in Oct 2010.
+ and implemented by Naoki Kambe of JPRS in October 2010.
</para>
</refsect1>
</refentry><!--
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
index 01ffdc6..a1f6406 100644
--- a/src/bin/stats/stats-httpd-xsl.tpl
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -44,6 +44,7 @@ td.title {
<h1>BIND 10 Statistics</h1>
<table>
<tr>
+ <th>Owner</th>
<th>Title</th>
<th>Value</th>
</tr>
diff --git a/src/bin/stats/stats-schema.spec b/src/bin/stats/stats-schema.spec
deleted file mode 100644
index 5252865..0000000
--- a/src/bin/stats/stats-schema.spec
+++ /dev/null
@@ -1,86 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Statistics data schema",
- "config_data": [
- {
- "item_name": "report_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "Report time",
- "item_description": "A date time when stats module reports",
- "item_format": "date-time"
- },
- {
- "item_name": "bind10.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "bind10.BootTime",
- "item_description": "A date time when bind10 process starts initially",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.BootTime",
- "item_description": "A date time when the stats module starts initially or when the stats module restarts",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.start_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.StartTime",
- "item_description": "A date time when the stats module starts collecting data or resetting values last time",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.last_update_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.LastUpdateTime",
- "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.timestamp",
- "item_type": "real",
- "item_optional": false,
- "item_default": 0.0,
- "item_title": "stats.Timestamp",
- "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
- },
- {
- "item_name": "stats.lname",
- "item_type": "string",
- "item_optional": false,
- "item_default": "",
- "item_title": "stats.LocalName",
- "item_description": "A localname of stats module given via CC protocol"
- },
- {
- "item_name": "auth.queries.tcp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.tcp",
- "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
- },
- {
- "item_name": "auth.queries.udp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.udp",
- "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
- }
- ],
- "commands": []
- }
-}
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
old mode 100644
new mode 100755
index ce3d9f4..da00818
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -15,18 +15,19 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+Statistics daemon in BIND 10
+
+"""
import sys; sys.path.append ('@@PYTHONPATH@@')
import os
-import signal
-import select
from time import time, strftime, gmtime
from optparse import OptionParser, OptionValueError
-from collections import defaultdict
-from isc.config.ccsession import ModuleCCSession, create_answer
-from isc.cc import Session, SessionError
+import isc
+import isc.util.process
import isc.log
-from stats_messages import *
+from isc.log_messages.stats_messages import *
isc.log.init("b10-stats")
logger = isc.log.Logger("stats")
@@ -35,211 +36,157 @@ logger = isc.log.Logger("stats")
# have #1074
DBG_STATS_MESSAGING = 30
+# This is for boot_time of Stats
+_BASETIME = gmtime()
+
# for setproctitle
-import isc.util.process
isc.util.process.rename()
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to that, otherwise we use the ones
# installed on the system
if "B10_FROM_SOURCE" in os.environ:
- BASE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats"
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
+ "src" + os.sep + "bin" + os.sep + "stats" + os.sep + "stats.spec"
else:
PREFIX = "@prefix@"
DATAROOTDIR = "@datarootdir@"
- BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
- BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
+ SPECFILE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@" + os.sep + "stats.spec"
+ SPECFILE_LOCATION = SPECFILE_LOCATION.replace("${datarootdir}", DATAROOTDIR)\
+ .replace("${prefix}", PREFIX)
-class Singleton(type):
+def get_timestamp():
"""
- A abstract class of singleton pattern
+ get current timestamp
"""
- # Because of singleton pattern:
- # At the beginning of coding, one UNIX domain socket is needed
- # for config manager, another socket is needed for stats module,
- # then stats module might need two sockets. So I adopted the
- # singleton pattern because I avoid creating multiple sockets in
- # one stats module. But in the initial version stats module
- # reports only via bindctl, so just one socket is needed. To use
- # the singleton pattern is not important now. :(
+ return time()
- def __init__(self, *args, **kwargs):
- type.__init__(self, *args, **kwargs)
- self._instances = {}
+def get_datetime(gmt=None):
+ """
+ get current datetime
+ """
+ if not gmt: gmt = gmtime()
+ return strftime("%Y-%m-%dT%H:%M:%SZ", gmt)
- def __call__(self, *args, **kwargs):
- if args not in self._instances:
- self._instances[args]={}
- kw = tuple(kwargs.items())
- if kw not in self._instances[args]:
- self._instances[args][kw] = type.__call__(self, *args, **kwargs)
- return self._instances[args][kw]
+def get_spec_defaults(spec):
+ """
+ extracts the default values of the items from spec specified in
+ arg, and returns the dict-type variable which is a set of the item
+ names and the default values
+ """
+ if type(spec) is not list: return {}
+ def _get_spec_defaults(spec):
+ item_type = spec['item_type']
+ if item_type == "integer":
+ return int(spec.get('item_default', 0))
+ elif item_type == "real":
+ return float(spec.get('item_default', 0.0))
+ elif item_type == "boolean":
+ return bool(spec.get('item_default', False))
+ elif item_type == "string":
+ return str(spec.get('item_default', ""))
+ elif item_type == "list":
+ return spec.get(
+ "item_default",
+ [ _get_spec_defaults(spec["list_item_spec"]) ])
+ elif item_type == "map":
+ return spec.get(
+ "item_default",
+ dict([ (s["item_name"], _get_spec_defaults(s)) for s in spec["map_item_spec"] ]) )
+ else:
+ return spec.get("item_default", None)
+ return dict([ (s['item_name'], _get_spec_defaults(s)) for s in spec ])
class Callback():
"""
A Callback handler class
"""
- def __init__(self, name=None, callback=None, args=(), kwargs={}):
- self.name = name
- self.callback = callback
+ def __init__(self, command=None, args=(), kwargs={}):
+ self.command = command
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
- if not args:
- args = self.args
- if not kwargs:
- kwargs = self.kwargs
- if self.callback:
- return self.callback(*args, **kwargs)
-
-class Subject():
- """
- A abstract subject class of observer pattern
- """
- # Because of observer pattern:
- # In the initial release, I'm also sure that observer pattern
- # isn't definitely needed because the interface between gathering
- # and reporting statistics data is single. However in the future
- # release, the interfaces may be multiple, that is, multiple
- # listeners may be needed. For example, one interface, which
- # stats module has, is for between ''config manager'' and stats
- # module, another interface is for between ''HTTP server'' and
- # stats module, and one more interface is for between ''SNMP
- # server'' and stats module. So by considering that stats module
- # needs multiple interfaces in the future release, I adopted the
- # observer pattern in stats module. But I don't have concrete
- # ideas in case of multiple listener currently.
-
- def __init__(self):
- self._listeners = []
-
- def attach(self, listener):
- if not listener in self._listeners:
- self._listeners.append(listener)
-
- def detach(self, listener):
- try:
- self._listeners.remove(listener)
- except ValueError:
- pass
+ if not args: args = self.args
+ if not kwargs: kwargs = self.kwargs
+ if self.command: return self.command(*args, **kwargs)
- def notify(self, event, modifier=None):
- for listener in self._listeners:
- if modifier != listener:
- listener.update(event)
+class StatsError(Exception):
+ """Exception class for Stats class"""
+ pass
-class Listener():
+class Stats:
"""
- A abstract listener class of observer pattern
+ Main class of stats module
"""
- def __init__(self, subject):
- self.subject = subject
- self.subject.attach(self)
- self.events = {}
-
- def update(self, name):
- if name in self.events:
- callback = self.events[name]
- return callback()
-
- def add_event(self, event):
- self.events[event.name]=event
-
-class SessionSubject(Subject, metaclass=Singleton):
- """
- A concrete subject class which creates CC session object
- """
- def __init__(self, session=None):
- Subject.__init__(self)
- self.session=session
- self.running = False
-
- def start(self):
- self.running = True
- self.notify('start')
-
- def stop(self):
+ def __init__(self):
self.running = False
- self.notify('stop')
-
- def check(self):
- self.notify('check')
-
-class CCSessionListener(Listener):
- """
- A concrete listener class which creates SessionSubject object and
- ModuleCCSession object
- """
- def __init__(self, subject):
- Listener.__init__(self, subject)
- self.session = subject.session
- self.boot_time = get_datetime()
-
# create ModuleCCSession object
- self.cc_session = ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler,
- self.command_handler,
- self.session)
-
- self.session = self.subject.session = self.cc_session._session
-
- # initialize internal data
- self.stats_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION).get_config_spec()
- self.stats_data = self.initialize_data(self.stats_spec)
-
- # add event handler invoked via SessionSubject object
- self.add_event(Callback('start', self.start))
- self.add_event(Callback('stop', self.stop))
- self.add_event(Callback('check', self.check))
- # don't add 'command_' suffix to the special commands in
- # order to prevent executing internal command via bindctl
-
+ self.mccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler)
+ self.cc_session = self.mccs._session
+ # get module spec
+ self.module_name = self.mccs.get_module_spec().get_module_name()
+ self.modules = {}
+ self.statistics_data = {}
# get commands spec
- self.commands_spec = self.cc_session.get_module_spec().get_commands_spec()
-
+ self.commands_spec = self.mccs.get_module_spec().get_commands_spec()
# add event handler related command_handler of ModuleCCSession
- # invoked via bindctl
+ self.callbacks = {}
for cmd in self.commands_spec:
+ # add prefix "command_"
+ name = "command_" + cmd["command_name"]
try:
- # add prefix "command_"
- name = "command_" + cmd["command_name"]
callback = getattr(self, name)
- kwargs = self.initialize_data(cmd["command_args"])
- self.add_event(Callback(name=name, callback=callback, args=(), kwargs=kwargs))
- except AttributeError as ae:
- logger.error(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
+ kwargs = get_spec_defaults(cmd["command_args"])
+ self.callbacks[name] = Callback(command=callback, kwargs=kwargs)
+ except AttributeError:
+ raise StatsError(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
+ self.mccs.start()
def start(self):
"""
- start the cc chanel
+ Start stats module
"""
- # set initial value
- self.stats_data['stats.boot_time'] = self.boot_time
- self.stats_data['stats.start_time'] = get_datetime()
- self.stats_data['stats.last_update_time'] = get_datetime()
- self.stats_data['stats.lname'] = self.session.lname
- self.cc_session.start()
+ self.running = True
+ logger.info(STATS_STARTING)
+
# request Bob to send statistics data
logger.debug(DBG_STATS_MESSAGING, STATS_SEND_REQUEST_BOSS)
- cmd = isc.config.ccsession.create_command("sendstats", None)
- seq = self.session.group_sendmsg(cmd, 'Boss')
- self.session.group_recvmsg(True, seq)
+ cmd = isc.config.ccsession.create_command("getstats", None)
+ seq = self.cc_session.group_sendmsg(cmd, 'Boss')
+ try:
+ answer, env = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ rcode, args = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ errors = self.update_statistics_data(
+ args["owner"], **args["data"])
+ if errors:
+ raise StatsError("boss spec file is incorrect: "
+ + ", ".join(errors))
+ errors = self.update_statistics_data(
+ self.module_name,
+ last_update_time=get_datetime())
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ except isc.cc.session.SessionTimeout:
+ pass
- def stop(self):
- """
- stop the cc chanel
- """
- return self.cc_session.close()
+ # initialized Statistics data
+ errors = self.update_statistics_data(
+ self.module_name,
+ lname=self.cc_session.lname,
+ boot_time=get_datetime(_BASETIME)
+ )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
- def check(self):
- """
- check the cc chanel
- """
- return self.cc_session.check_command(False)
+ while self.running:
+ self.mccs.check_command(False)
def config_handler(self, new_config):
"""
@@ -247,174 +194,222 @@ class CCSessionListener(Listener):
"""
logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_NEW_CONFIG,
new_config)
-
# do nothing currently
- return create_answer(0)
+ return isc.config.create_answer(0)
- def command_handler(self, command, *args, **kwargs):
+ def command_handler(self, command, kwargs):
"""
handle commands from the cc channel
"""
- # add 'command_' suffix in order to executing command via bindctl
name = 'command_' + command
-
- if name in self.events:
- event = self.events[name]
- return event(*args, **kwargs)
+ if name in self.callbacks:
+ callback = self.callbacks[name]
+ if kwargs:
+ return callback(**kwargs)
+ else:
+ return callback()
else:
- return self.command_unknown(command, args)
+ logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
+ return isc.config.create_answer(1, "Unknown command: '"+str(command)+"'")
- def command_shutdown(self, args):
+ def update_modules(self):
"""
- handle shutdown command
+ updates information of each module. This method gets each
+ module's information from the config manager and sets it into
+ self.modules. If its getting from the config manager fails, it
+ raises StatsError.
"""
- logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
- self.subject.running = False
- return create_answer(0)
+ modules = {}
+ seq = self.cc_session.group_sendmsg(
+ isc.config.ccsession.create_command(
+ isc.config.ccsession.COMMAND_GET_STATISTICS_SPEC),
+ 'ConfigManager')
+ (answer, env) = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ (rcode, value) = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ for mod in value:
+ spec = { "module_name" : mod }
+ if value[mod] and type(value[mod]) is list:
+ spec["statistics"] = value[mod]
+ modules[mod] = isc.config.module_spec.ModuleSpec(spec)
+ else:
+ raise StatsError("Updating module spec fails: " + str(value))
+ modules[self.module_name] = self.mccs.get_module_spec()
+ self.modules = modules
- def command_set(self, args, stats_data={}):
+ def get_statistics_data(self, owner=None, name=None):
"""
- handle set command
+ returns statistics data which stats module has of each
+ module. If it can't find specified statistics data, it raises
+ StatsError.
"""
- # 'args' must be dictionary type
- self.stats_data.update(args['stats_data'])
-
- # overwrite "stats.LastUpdateTime"
- self.stats_data['stats.last_update_time'] = get_datetime()
-
- return create_answer(0)
+ self.update_statistics_data()
+ if owner and name:
+ try:
+ return self.statistics_data[owner][name]
+ except KeyError:
+ pass
+ elif owner:
+ try:
+ return self.statistics_data[owner]
+ except KeyError:
+ pass
+ elif name:
+ pass
+ else:
+ return self.statistics_data
+ raise StatsError("No statistics data found: "
+ + "owner: " + str(owner) + ", "
+ + "name: " + str(name))
- def command_remove(self, args, stats_item_name=''):
+ def update_statistics_data(self, owner=None, **data):
"""
- handle remove command
+ change statistics date of specified module into specified
+ data. It updates information of each module first, and it
+ updates statistics data. If specified data is invalid for
+ statistics spec of specified owner, it returns a list of error
+ messeges. If there is no error or if neither owner nor data is
+ specified in args, it returns None.
"""
-
- # 'args' must be dictionary type
- if args and args['stats_item_name'] in self.stats_data:
- stats_item_name = args['stats_item_name']
-
- logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_REMOVE_COMMAND,
- stats_item_name)
-
- # just remove one item
- self.stats_data.pop(stats_item_name)
-
- return create_answer(0)
-
- def command_show(self, args, stats_item_name=''):
+ self.update_modules()
+ statistics_data = {}
+ for (name, module) in self.modules.items():
+ value = get_spec_defaults(module.get_statistics_spec())
+ if module.validate_statistics(True, value):
+ statistics_data[name] = value
+ for (name, value) in self.statistics_data.items():
+ if name in statistics_data:
+ statistics_data[name].update(value)
+ else:
+ statistics_data[name] = value
+ self.statistics_data = statistics_data
+ if owner and data:
+ errors = []
+ try:
+ if self.modules[owner].validate_statistics(False, data, errors):
+ self.statistics_data[owner].update(data)
+ return
+ except KeyError:
+ errors.append("unknown module name: " + str(owner))
+ return errors
+
+ def command_status(self):
"""
- handle show command
+ handle status command
"""
+ logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
+ return isc.config.create_answer(
+ 0, "Stats is up. (PID " + str(os.getpid()) + ")")
- # always overwrite 'report_time' and 'stats.timestamp'
- # if "show" command invoked
- self.stats_data['report_time'] = get_datetime()
- self.stats_data['stats.timestamp'] = get_timestamp()
-
- # if with args
- if args and args['stats_item_name'] in self.stats_data:
- stats_item_name = args['stats_item_name']
- logger.debug(DBG_STATS_MESSAGING,
- STATS_RECEIVED_SHOW_NAME_COMMAND,
- stats_item_name)
- return create_answer(0, {stats_item_name: self.stats_data[stats_item_name]})
-
- logger.debug(DBG_STATS_MESSAGING,
- STATS_RECEIVED_SHOW_ALL_COMMAND)
- return create_answer(0, self.stats_data)
-
- def command_reset(self, args):
+ def command_shutdown(self):
"""
- handle reset command
+ handle shutdown command
"""
- logger.debug(DBG_STATS_MESSAGING,
- STATS_RECEIVED_RESET_COMMAND)
-
- # re-initialize internal variables
- self.stats_data = self.initialize_data(self.stats_spec)
-
- # reset initial value
- self.stats_data['stats.boot_time'] = self.boot_time
- self.stats_data['stats.start_time'] = get_datetime()
- self.stats_data['stats.last_update_time'] = get_datetime()
- self.stats_data['stats.lname'] = self.session.lname
-
- return create_answer(0)
+ logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
+ self.running = False
+ return isc.config.create_answer(0)
- def command_status(self, args):
+ def command_show(self, owner=None, name=None):
"""
- handle status command
+ handle show command
"""
- logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
- # just return "I'm alive."
- return create_answer(0, "I'm alive.")
-
- def command_unknown(self, command, args):
+ if owner or name:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_NAME_COMMAND,
+ str(owner)+", "+str(name))
+ else:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_ALL_COMMAND)
+ errors = self.update_statistics_data(
+ self.module_name,
+ timestamp=get_timestamp(),
+ report_time=get_datetime()
+ )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ try:
+ return isc.config.create_answer(
+ 0, self.get_statistics_data(owner, name))
+ except StatsError:
+ return isc.config.create_answer(
+ 1, "specified arguments are incorrect: " \
+ + "owner: " + str(owner) + ", name: " + str(name))
+
+ def command_showschema(self, owner=None, name=None):
"""
- handle an unknown command
+ handle show command
"""
- logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
- return create_answer(1, "Unknown command: '"+str(command)+"'")
-
+ if owner or name:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND,
+ str(owner)+", "+str(name))
+ else:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND)
+ self.update_modules()
+ schema = {}
+ schema_byname = {}
+ for mod in self.modules:
+ spec = self.modules[mod].get_statistics_spec()
+ schema_byname[mod] = {}
+ if spec:
+ schema[mod] = spec
+ for item in spec:
+ schema_byname[mod][item['item_name']] = item
+ if owner:
+ try:
+ if name:
+ return isc.config.create_answer(0, schema_byname[owner][name])
+ else:
+ return isc.config.create_answer(0, schema[owner])
+ except KeyError:
+ pass
+ else:
+ if name:
+ return isc.config.create_answer(1, "module name is not specified")
+ else:
+ return isc.config.create_answer(0, schema)
+ return isc.config.create_answer(
+ 1, "specified arguments are incorrect: " \
+ + "owner: " + str(owner) + ", name: " + str(name))
- def initialize_data(self, spec):
+ def command_set(self, owner, data):
"""
- initialize stats data
+ handle set command
"""
- def __get_init_val(spec):
- if spec['item_type'] == 'null':
- return None
- elif spec['item_type'] == 'boolean':
- return bool(spec.get('item_default', False))
- elif spec['item_type'] == 'string':
- return str(spec.get('item_default', ''))
- elif spec['item_type'] in set(['number', 'integer']):
- return int(spec.get('item_default', 0))
- elif spec['item_type'] in set(['float', 'double', 'real']):
- return float(spec.get('item_default', 0.0))
- elif spec['item_type'] in set(['list', 'array']):
- return spec.get('item_default',
- [ __get_init_val(s) for s in spec['list_item_spec'] ])
- elif spec['item_type'] in set(['map', 'object']):
- return spec.get('item_default',
- dict([ (s['item_name'], __get_init_val(s)) for s in spec['map_item_spec'] ]) )
- else:
- return spec.get('item_default')
- return dict([ (s['item_name'], __get_init_val(s)) for s in spec ])
+ errors = self.update_statistics_data(owner, **data)
+ if errors:
+ return isc.config.create_answer(
+ 1, "errors while setting statistics data: " \
+ + ", ".join(errors))
+ errors = self.update_statistics_data(
+ self.module_name, last_update_time=get_datetime() )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ return isc.config.create_answer(0)
-def get_timestamp():
- """
- get current timestamp
- """
- return time()
-
-def get_datetime():
- """
- get current datetime
- """
- return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
-
-def main(session=None):
+if __name__ == "__main__":
try:
parser = OptionParser()
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
+ parser.add_option(
+ "-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
(options, args) = parser.parse_args()
if options.verbose:
isc.log.init("b10-stats", "DEBUG", 99)
- subject = SessionSubject(session=session)
- listener = CCSessionListener(subject)
- subject.start()
- while subject.running:
- subject.check()
- subject.stop()
-
+ stats = Stats()
+ stats.start()
except OptionValueError as ove:
logger.fatal(STATS_BAD_OPTION_VALUE, ove)
- except SessionError as se:
+ sys.exit(1)
+ except isc.cc.session.SessionError as se:
logger.fatal(STATS_CC_SESSION_ERROR, se)
+ sys.exit(1)
+ except StatsError as se:
+ logger.fatal(STATS_START_ERROR, se)
+ sys.exit(1)
except KeyboardInterrupt as kie:
logger.info(STATS_STOPPED_BY_KEYBOARD)
-
-if __name__ == "__main__":
- main()
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
index 635eb48..e716b62 100644
--- a/src/bin/stats/stats.spec
+++ b/src/bin/stats/stats.spec
@@ -6,55 +6,74 @@
"commands": [
{
"command_name": "status",
- "command_description": "identify whether stats module is alive or not",
+ "command_description": "Show status of the stats daemon",
+ "command_args": []
+ },
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down the stats module",
"command_args": []
},
{
"command_name": "show",
- "command_description": "show the specified/all statistics data",
+ "command_description": "Show the specified/all statistics data",
"command_args": [
{
- "item_name": "stats_item_name",
+ "item_name": "owner",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "name",
"item_type": "string",
"item_optional": true,
- "item_default": ""
+ "item_default": "",
+ "item_description": "statistics item name of the owner"
}
]
},
{
- "command_name": "set",
- "command_description": "set the value of specified name in statistics data",
+ "command_name": "showschema",
+ "command_description": "show the specified/all statistics shema",
"command_args": [
{
- "item_name": "stats_data",
- "item_type": "map",
- "item_optional": false,
- "item_default": {},
- "map_item_spec": []
+ "item_name": "owner",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "name",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "statistics item name of the owner"
}
]
},
{
- "command_name": "remove",
- "command_description": "remove the specified name from statistics data",
+ "command_name": "set",
+ "command_description": "set the value of specified name in statistics data",
"command_args": [
{
- "item_name": "stats_item_name",
+ "item_name": "owner",
"item_type": "string",
"item_optional": false,
- "item_default": ""
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "data",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "item_description": "statistics data set of the owner",
+ "map_item_spec": []
}
]
- },
- {
- "command_name": "reset",
- "command_description": "reset all statistics data to default values except for several constant names",
- "command_args": []
- },
- {
- "command_name": "shutdown",
- "command_description": "Shut down the stats module",
- "command_args": []
}
],
"statistics": [
@@ -100,7 +119,7 @@
"item_default": "",
"item_title": "Local Name",
"item_description": "A localname of stats module given via CC protocol"
- }
+ }
]
}
}
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
old mode 100755
new mode 100644
index 74298cf..596870a
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -35,7 +35,7 @@ import isc.config
import isc.util.process
import isc.log
-from stats_httpd_messages import *
+from isc.log_messages.stats_httpd_messages import *
isc.log.init("b10-stats-httpd")
logger = isc.log.Logger("stats-httpd")
@@ -57,7 +57,6 @@ else:
BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
XML_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xml.tpl"
XSD_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsd.tpl"
XSL_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsl.tpl"
@@ -69,7 +68,6 @@ XSD_URL_PATH = '/bind10/statistics/xsd'
XSL_URL_PATH = '/bind10/statistics/xsl'
# TODO: This should be considered later.
XSD_NAMESPACE = 'http://bind10.isc.org' + XSD_URL_PATH
-DEFAULT_CONFIG = dict(listen_on=[('127.0.0.1', 8000)])
# Assign this process name
isc.util.process.rename()
@@ -160,8 +158,10 @@ class StatsHttpd:
self.mccs = None
self.httpd = []
self.open_mccs()
+ self.config = {}
self.load_config()
- self.load_templates()
+ self.http_addrs = []
+ self.mccs.start()
self.open_httpd()
def open_mccs(self):
@@ -171,10 +171,6 @@ class StatsHttpd:
self.mccs = isc.config.ModuleCCSession(
SPECFILE_LOCATION, self.config_handler, self.command_handler)
self.cc_session = self.mccs._session
- # read spec file of stats module and subscribe 'Stats'
- self.stats_module_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION)
- self.stats_config_spec = self.stats_module_spec.get_config_spec()
- self.stats_module_name = self.stats_module_spec.get_module_name()
def close_mccs(self):
"""Closes a ModuleCCSession object"""
@@ -189,18 +185,19 @@ class StatsHttpd:
"""Loads configuration from spec file or new configuration
from the config manager"""
# load config
- if len(new_config) > 0:
- self.config.update(new_config)
- else:
- self.config = DEFAULT_CONFIG
- self.config.update(
- dict([
- (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
- for itm in self.mccs.get_module_spec().get_config_spec()
- ])
- )
+ if len(self.config) == 0:
+ self.config = dict([
+ (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
+ for itm in self.mccs.get_module_spec().get_config_spec()
+ ])
+ self.config.update(new_config)
# set addresses and ports for HTTP
- self.http_addrs = [ (cf['address'], cf['port']) for cf in self.config['listen_on'] ]
+ addrs = []
+ if 'listen_on' in self.config:
+ for cf in self.config['listen_on']:
+ if 'address' in cf and 'port' in cf:
+ addrs.append((cf['address'], cf['port']))
+ self.http_addrs = addrs
def open_httpd(self):
"""Opens sockets for HTTP. Iterating each HTTP address to be
@@ -208,46 +205,44 @@ class StatsHttpd:
for addr in self.http_addrs:
self.httpd.append(self._open_httpd(addr))
- def _open_httpd(self, server_address, address_family=None):
+ def _open_httpd(self, server_address):
+ httpd = None
try:
- # try IPv6 at first
- if address_family is not None:
- HttpServer.address_family = address_family
- elif socket.has_ipv6:
- HttpServer.address_family = socket.AF_INET6
+ # get address family for the server_address before
+ # creating HttpServer object. If a specified address is
+ # not numerical, gaierror may be thrown.
+ address_family = socket.getaddrinfo(
+ server_address[0], server_address[1], 0,
+ socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_NUMERICHOST
+ )[0][0]
+ HttpServer.address_family = address_family
httpd = HttpServer(
server_address, HttpHandler,
self.xml_handler, self.xsd_handler, self.xsl_handler,
self.write_log)
- except (socket.gaierror, socket.error,
- OverflowError, TypeError) as err:
- # try IPv4 next
- if HttpServer.address_family == socket.AF_INET6:
- httpd = self._open_httpd(server_address, socket.AF_INET)
- else:
- raise HttpServerError(
- "Invalid address %s, port %s: %s: %s" %
- (server_address[0], server_address[1],
- err.__class__.__name__, err))
- else:
logger.info(STATHTTPD_STARTED, server_address[0],
server_address[1])
- return httpd
+ return httpd
+ except (socket.gaierror, socket.error,
+ OverflowError, TypeError) as err:
+ if httpd:
+ httpd.server_close()
+ raise HttpServerError(
+ "Invalid address %s, port %s: %s: %s" %
+ (server_address[0], server_address[1],
+ err.__class__.__name__, err))
def close_httpd(self):
"""Closes sockets for HTTP"""
- if len(self.httpd) == 0:
- return
- for ht in self.httpd:
+ while len(self.httpd)>0:
+ ht = self.httpd.pop()
logger.info(STATHTTPD_CLOSING, ht.server_address[0],
ht.server_address[1])
ht.server_close()
- self.httpd = []
def start(self):
"""Starts StatsHttpd objects to run. Waiting for client
requests by using select.select functions"""
- self.mccs.start()
self.running = True
while self.running:
try:
@@ -280,6 +275,7 @@ class StatsHttpd:
logger.info(STATHTTPD_SHUTDOWN)
self.close_httpd()
self.close_mccs()
+ self.running = False
def get_sockets(self):
"""Returns sockets to select.select"""
@@ -296,23 +292,27 @@ class StatsHttpd:
addresses and ports to listen HTTP requests on."""
logger.debug(DBG_STATHTTPD_MESSAGING, STATHTTPD_HANDLE_CONFIG,
new_config)
- for key in new_config.keys():
- if key not in DEFAULT_CONFIG and key != "version":
- logger.error(STATHTTPD_UNKNOWN_CONFIG_ITEM, key)
+ errors = []
+ if not self.mccs.get_module_spec().\
+ validate_config(False, new_config, errors):
return isc.config.ccsession.create_answer(
- 1, "Unknown known config: %s" % key)
+ 1, ", ".join(errors))
# backup old config
old_config = self.config.copy()
- self.close_httpd()
self.load_config(new_config)
+ # If the http sockets aren't opened or
+ # if new_config doesn't have'listen_on', it returns
+ if len(self.httpd) == 0 or 'listen_on' not in new_config:
+ return isc.config.ccsession.create_answer(0)
+ self.close_httpd()
try:
self.open_httpd()
except HttpServerError as err:
logger.error(STATHTTPD_SERVER_ERROR, err)
# restore old config
- self.config_handler(old_config)
- return isc.config.ccsession.create_answer(
- 1, "[b10-stats-httpd] %s" % err)
+ self.load_config(old_config)
+ self.open_httpd()
+ return isc.config.ccsession.create_answer(1, str(err))
else:
return isc.config.ccsession.create_answer(0)
@@ -328,8 +328,7 @@ class StatsHttpd:
logger.debug(DBG_STATHTTPD_MESSAGING,
STATHTTPD_RECEIVED_SHUTDOWN_COMMAND)
self.running = False
- return isc.config.ccsession.create_answer(
- 0, "Stats Httpd is shutting down.")
+ return isc.config.ccsession.create_answer(0)
else:
logger.debug(DBG_STATHTTPD_MESSAGING,
STATHTTPD_RECEIVED_UNKNOWN_COMMAND, command)
@@ -341,8 +340,7 @@ class StatsHttpd:
the data which obtains from it"""
try:
seq = self.cc_session.group_sendmsg(
- isc.config.ccsession.create_command('show'),
- self.stats_module_name)
+ isc.config.ccsession.create_command('show'), 'Stats')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
@@ -357,34 +355,82 @@ class StatsHttpd:
raise StatsHttpdError("Stats module: %s" % str(value))
def get_stats_spec(self):
- """Just returns spec data"""
- return self.stats_config_spec
-
- def load_templates(self):
- """Setup the bodies of XSD and XSL documents to be responds to
- HTTP clients. Before that it also creates XML tag structures by
- using xml.etree.ElementTree.Element class and substitutes
- concrete strings with parameters embed in the string.Template
- object."""
+ """Requests statistics data to the Stats daemon and returns
+ the data which obtains from it"""
+ try:
+ seq = self.cc_session.group_sendmsg(
+ isc.config.ccsession.create_command('showschema'), 'Stats')
+ (answer, env) = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ (rcode, value) = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ return value
+ else:
+ raise StatsHttpdError("Stats module: %s" % str(value))
+ except (isc.cc.session.SessionTimeout,
+ isc.cc.session.SessionError) as err:
+ raise StatsHttpdError("%s: %s" %
+ (err.__class__.__name__, err))
+
+ def xml_handler(self):
+ """Handler which requests to Stats daemon to obtain statistics
+ data and returns the body of XML document"""
+ xml_list=[]
+ for (mod, spec) in self.get_stats_data().items():
+ if not spec: continue
+ elem1 = xml.etree.ElementTree.Element(str(mod))
+ for (k, v) in spec.items():
+ elem2 = xml.etree.ElementTree.Element(str(k))
+ elem2.text = str(v)
+ elem1.append(elem2)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ xml_list.append(
+ str(xml.etree.ElementTree.tostring(elem1, encoding='utf-8'),
+ encoding='us-ascii'))
+ xml_string = "".join(xml_list)
+ self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
+ xml_string=xml_string,
+ xsd_namespace=XSD_NAMESPACE,
+ xsd_url_path=XSD_URL_PATH,
+ xsl_url_path=XSL_URL_PATH)
+ assert self.xml_body is not None
+ return self.xml_body
+
+ def xsd_handler(self):
+ """Handler which just returns the body of XSD document"""
# for XSD
xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
- for item in self.get_stats_spec():
- element = xml.etree.ElementTree.Element(
- "element",
- dict( name=item["item_name"],
- type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
- minOccurs="1",
- maxOccurs="1" ),
- )
- annotation = xml.etree.ElementTree.Element("annotation")
- appinfo = xml.etree.ElementTree.Element("appinfo")
- documentation = xml.etree.ElementTree.Element("documentation")
- appinfo.text = item["item_title"]
- documentation.text = item["item_description"]
- annotation.append(appinfo)
- annotation.append(documentation)
- element.append(annotation)
- xsd_root.append(element)
+ for (mod, spec) in self.get_stats_spec().items():
+ if not spec: continue
+ alltag = xml.etree.ElementTree.Element("all")
+ for item in spec:
+ element = xml.etree.ElementTree.Element(
+ "element",
+ dict( name=item["item_name"],
+ type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
+ minOccurs="1",
+ maxOccurs="1" ),
+ )
+ annotation = xml.etree.ElementTree.Element("annotation")
+ appinfo = xml.etree.ElementTree.Element("appinfo")
+ documentation = xml.etree.ElementTree.Element("documentation")
+ appinfo.text = item["item_title"]
+ documentation.text = item["item_description"]
+ annotation.append(appinfo)
+ annotation.append(documentation)
+ element.append(annotation)
+ alltag.append(element)
+
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ mod_element = xml.etree.ElementTree.Element("element", { "name" : mod })
+ mod_element.append(complextype)
+ xsd_root.append(mod_element)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
@@ -398,25 +444,33 @@ class StatsHttpd:
xsd_namespace=XSD_NAMESPACE
)
assert self.xsd_body is not None
+ return self.xsd_body
+ def xsl_handler(self):
+ """Handler which just returns the body of XSL document"""
# for XSL
xsd_root = xml.etree.ElementTree.Element(
"xsl:template",
dict(match="*")) # started with xml:template tag
- for item in self.get_stats_spec():
- tr = xml.etree.ElementTree.Element("tr")
- td1 = xml.etree.ElementTree.Element(
- "td", { "class" : "title",
- "title" : item["item_description"] })
- td1.text = item["item_title"]
- td2 = xml.etree.ElementTree.Element("td")
- xsl_valueof = xml.etree.ElementTree.Element(
- "xsl:value-of",
- dict(select=item["item_name"]))
- td2.append(xsl_valueof)
- tr.append(td1)
- tr.append(td2)
- xsd_root.append(tr)
+ for (mod, spec) in self.get_stats_spec().items():
+ if not spec: continue
+ for item in spec:
+ tr = xml.etree.ElementTree.Element("tr")
+ td0 = xml.etree.ElementTree.Element("td")
+ td0.text = str(mod)
+ td1 = xml.etree.ElementTree.Element(
+ "td", { "class" : "title",
+ "title" : item["item_description"] })
+ td1.text = item["item_title"]
+ td2 = xml.etree.ElementTree.Element("td")
+ xsl_valueof = xml.etree.ElementTree.Element(
+ "xsl:value-of",
+ dict(select=mod+'/'+item["item_name"]))
+ td2.append(xsl_valueof)
+ tr.append(td0)
+ tr.append(td1)
+ tr.append(td2)
+ xsd_root.append(tr)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
@@ -429,47 +483,15 @@ class StatsHttpd:
xsl_string=xsl_string,
xsd_namespace=XSD_NAMESPACE)
assert self.xsl_body is not None
-
- def xml_handler(self):
- """Handler which requests to Stats daemon to obtain statistics
- data and returns the body of XML document"""
- xml_list=[]
- for (k, v) in self.get_stats_data().items():
- (k, v) = (str(k), str(v))
- elem = xml.etree.ElementTree.Element(k)
- elem.text = v
- # The coding conversion is tricky. xml..tostring() of Python 3.2
- # returns bytes (not string) regardless of the coding, while
- # tostring() of Python 3.1 returns a string. To support both
- # cases transparently, we first make sure tostring() returns
- # bytes by specifying utf-8 and then convert the result to a
- # plain string (code below assume it).
- xml_list.append(
- str(xml.etree.ElementTree.tostring(elem, encoding='utf-8'),
- encoding='us-ascii'))
- xml_string = "".join(xml_list)
- self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
- xml_string=xml_string,
- xsd_namespace=XSD_NAMESPACE,
- xsd_url_path=XSD_URL_PATH,
- xsl_url_path=XSL_URL_PATH)
- assert self.xml_body is not None
- return self.xml_body
-
- def xsd_handler(self):
- """Handler which just returns the body of XSD document"""
- return self.xsd_body
-
- def xsl_handler(self):
- """Handler which just returns the body of XSL document"""
return self.xsl_body
def open_template(self, file_name):
"""It opens a template file, and it loads all lines to a
string variable and returns string. Template object includes
the variable. Limitation of a file size isn't needed there."""
- lines = "".join(
- open(file_name, 'r').readlines())
+ f = open(file_name, 'r')
+ lines = "".join(f.readlines())
+ f.close()
assert lines is not None
return string.Template(lines)
@@ -491,7 +513,7 @@ if __name__ == "__main__":
logger.fatal(STATHTTPD_CC_SESSION_ERROR, se)
sys.exit(1)
except HttpServerError as hse:
- logger.fatal(STATHTTPD_START_SERVER_ERROR, hse)
+ logger.fatal(STATHTTPD_START_SERVER_INIT_ERROR, hse)
sys.exit(1)
except KeyboardInterrupt as kie:
logger.info(STATHTTPD_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/stats/stats_messages.mes b/src/bin/stats/stats_messages.mes
index 9ad07cf..cfffb3a 100644
--- a/src/bin/stats/stats_messages.mes
+++ b/src/bin/stats/stats_messages.mes
@@ -28,16 +28,6 @@ control bus. A likely problem is that the message bus daemon
This debug message is printed when the stats module has received a
configuration update from the configuration manager.
-% STATS_RECEIVED_REMOVE_COMMAND received command to remove %1
-A remove command for the given name was sent to the stats module, and
-the given statistics value will now be removed. It will not appear in
-statistics reports until it appears in a statistics update from a
-module again.
-
-% STATS_RECEIVED_RESET_COMMAND received command to reset all statistics
-The stats module received a command to clear all collected statistics.
-The data is cleared until it receives an update from the modules again.
-
% STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics
The stats module received a command to show all statistics that it has
collected.
@@ -72,4 +62,15 @@ installation problem, where the specification file stats.spec is
from a different version of BIND 10 than the stats module itself.
Please check your installation.
+% STATS_STARTING starting
+The stats module will be now starting.
+
+% STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema
+The stats module received a command to show all statistics schemas of all modules.
+
+% STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1
+The stats module received a command to show the specified statistics schema of the specified module.
+% STATS_START_ERROR stats module error: %1
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index dad6c48..b5edc59 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -1,28 +1,28 @@
-SUBDIRS = isc http testdata
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-stats_test.py b10-stats-httpd_test.py
-EXTRA_DIST = $(PYTESTS) fake_time.py fake_socket.py fake_select.py
-CLEANFILES = fake_time.pyc fake_socket.pyc fake_select.pyc
+EXTRA_DIST = $(PYTESTS) test_utils.py
+CLEANFILES = test_utils.pyc
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests:$(abs_top_builddir)/src/bin/msgq:$(abs_top_builddir)/src/lib/python/isc/config \
B10_FROM_SOURCE=$(abs_top_srcdir) \
+ CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index 6d72dc2..e867080 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -13,147 +13,269 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats http server in a
+close to real environment.
+"""
+
import unittest
import os
-import http.server
-import string
-import fake_select
import imp
-import sys
-import fake_socket
-
-import isc.cc
+import socket
+import errno
+import select
+import string
+import time
+import threading
+import http.client
+import xml.etree.ElementTree
+import random
+import isc
import stats_httpd
-stats_httpd.socket = fake_socket
-stats_httpd.select = fake_select
+import stats
+from test_utils import BaseModules, ThreadingServerManager, MyStats, MyStatsHttpd, SignalHandler, send_command, send_shutdown
DUMMY_DATA = {
- "auth.queries.tcp": 10000,
- "auth.queries.udp": 12000,
- "bind10.boot_time": "2011-03-04T11:59:05Z",
- "report_time": "2011-03-04T11:59:19Z",
- "stats.boot_time": "2011-03-04T11:59:06Z",
- "stats.last_update_time": "2011-03-04T11:59:07Z",
- "stats.lname": "4d70d40a_c at host",
- "stats.start_time": "2011-03-04T11:59:06Z",
- "stats.timestamp": 1299239959.560846
+ 'Boss' : {
+ "boot_time": "2011-03-04T11:59:06Z"
+ },
+ 'Auth' : {
+ "queries.tcp": 2,
+ "queries.udp": 3
+ },
+ 'Stats' : {
+ "report_time": "2011-03-04T11:59:19Z",
+ "boot_time": "2011-03-04T11:59:06Z",
+ "last_update_time": "2011-03-04T11:59:07Z",
+ "lname": "4d70d40a_c at host",
+ "timestamp": 1299239959.560846
+ }
}
-def push_answer(stats_httpd):
- stats_httpd.cc_session.group_sendmsg(
- { 'result':
- [ 0, DUMMY_DATA ] }, "Stats")
-
-def pull_query(stats_httpd):
- (msg, env) = stats_httpd.cc_session.group_recvmsg()
- if 'result' in msg:
- (ret, arg) = isc.config.ccsession.parse_answer(msg)
- else:
- (ret, arg) = isc.config.ccsession.parse_command(msg)
- return (ret, arg, env)
+def get_availaddr(address='127.0.0.1', port=8001):
+ """returns a tuple of address and port which is available to
+ listen on the platform. The first argument is a address for
+ search. The second argument is a port for search. If a set of
+ address and port is failed on the search for the availability, the
+ port number is increased and it goes on the next trial until the
+ available set of address and port is looked up. If the port number
+ reaches over 65535, it may stop the search and raise a
+ OverflowError exception."""
+ while True:
+ for addr in socket.getaddrinfo(
+ address, port, 0,
+ socket.SOCK_STREAM, socket.IPPROTO_TCP):
+ sock = socket.socket(addr[0], socket.SOCK_STREAM)
+ try:
+ sock.bind((address, port))
+ return (address, port)
+ except socket.error:
+ continue
+ finally:
+ if sock: sock.close()
+ # This address and port number are already in use.
+ # next port number is added
+ port = port + 1
+
+def is_ipv6_enabled(address='::1', port=8001):
+ """checks IPv6 enabled on the platform. address for check is '::1'
+ and port for check is random number between 8001 and
+ 65535. Retrying is 3 times even if it fails. The built-in socket
+ module provides a 'has_ipv6' parameter, but it's not used here
+ because there may be a situation where the value is True on an
+ environment where the IPv6 config is disabled."""
+ for p in random.sample(range(port, 65535), 3):
+ try:
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ sock.bind((address, p))
+ return True
+ except socket.error:
+ continue
+ finally:
+ if sock: sock.close()
+ return False
class TestHttpHandler(unittest.TestCase):
"""Tests for HttpHandler class"""
-
def setUp(self):
- self.stats_httpd = stats_httpd.StatsHttpd()
- self.assertTrue(type(self.stats_httpd.httpd) is list)
- self.httpd = self.stats_httpd.httpd
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.stats_server.run()
+ (self.address, self.port) = get_availaddr()
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, (self.address, self.port))
+ self.stats_httpd = self.stats_httpd_server.server
+ self.stats_httpd_server.run()
+ self.client = http.client.HTTPConnection(self.address, self.port)
+ self.client._http_vsn_str = 'HTTP/1.0\n'
+ self.client.connect()
- def test_do_GET(self):
- for ht in self.httpd:
- self._test_do_GET(ht._handler)
+ def tearDown(self):
+ self.client.close()
+ self.stats_httpd_server.shutdown()
+ self.stats_server.shutdown()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
- def _test_do_GET(self, handler):
+ def test_do_GET(self):
+ self.assertTrue(type(self.stats_httpd.httpd) is list)
+ self.assertEqual(len(self.stats_httpd.httpd), 1)
+ self.assertEqual((self.address, self.port), self.stats_httpd.http_addrs[0])
# URL is '/bind10/statistics/xml'
- handler.path = stats_httpd.XML_URL_PATH
- push_answer(self.stats_httpd)
- handler.do_GET()
- (ret, arg, env) = pull_query(self.stats_httpd)
- self.assertEqual(ret, "show")
- self.assertIsNone(arg)
- self.assertTrue('group' in env)
- self.assertEqual(env['group'], 'Stats')
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_URL_PATH)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
- self.assertTrue(handler.response.body.find(str(v))>0)
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ self.assertTrue(root.tag.find('stats_data') > 0)
+ for (k,v) in root.attrib.items():
+ if k.find('schemaLocation') > 0:
+ self.assertEqual(v, stats_httpd.XSD_NAMESPACE + ' ' + stats_httpd.XSD_URL_PATH)
+ for mod in DUMMY_DATA:
+ for (item, value) in DUMMY_DATA[mod].items():
+ self.assertIsNotNone(root.find(mod + '/' + item))
# URL is '/bind10/statitics/xsd'
- handler.path = stats_httpd.XSD_URL_PATH
- handler.do_GET()
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
+ tags = [ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ]
+ xsdpath = '/'.join(tags)
+ self.assertTrue(root.tag.find('schema') > 0)
+ self.assertTrue(hasattr(root, 'attrib'))
+ self.assertTrue('targetNamespace' in root.attrib)
+ self.assertEqual(root.attrib['targetNamespace'],
+ stats_httpd.XSD_NAMESPACE)
+ for elm in root.findall(xsdpath):
+ self.assertIsNotNone(elm.attrib['name'])
+ self.assertTrue(elm.attrib['name'] in DUMMY_DATA)
# URL is '/bind10/statitics/xsl'
- handler.path = stats_httpd.XSL_URL_PATH
- handler.do_GET()
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_trans = '{http://www.w3.org/1999/XSL/Transform}'
+ url_xhtml = '{http://www.w3.org/1999/xhtml}'
+ xslpath = url_trans + 'template/' + url_xhtml + 'tr'
+ self.assertEqual(root.tag, url_trans + 'stylesheet')
+ for tr in root.findall(xslpath):
+ tds = tr.findall(url_xhtml + 'td')
+ self.assertIsNotNone(tds)
+ self.assertEqual(type(tds), list)
+ self.assertTrue(len(tds) > 2)
+ self.assertTrue(hasattr(tds[0], 'text'))
+ self.assertTrue(tds[0].text in DUMMY_DATA)
+ valueof = tds[2].find(url_trans + 'value-of')
+ self.assertIsNotNone(valueof)
+ self.assertTrue(hasattr(valueof, 'attrib'))
+ self.assertIsNotNone(valueof.attrib)
+ self.assertTrue('select' in valueof.attrib)
+ self.assertTrue(valueof.attrib['select'] in \
+ [ tds[0].text+'/'+item for item in DUMMY_DATA[tds[0].text].keys() ])
# 302 redirect
- handler.path = '/'
- handler.headers = {'Host': 'my.host.domain'}
- handler.do_GET()
- self.assertEqual(handler.response.code, 302)
- self.assertEqual(handler.response.headers["Location"],
- "http://my.host.domain%s" % stats_httpd.XML_URL_PATH)
+ self.client._http_vsn_str = 'HTTP/1.1'
+ self.client.putrequest('GET', '/')
+ self.client.putheader('Host', self.address)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 302)
+ self.assertEqual(response.getheader('Location'),
+ "http://%s:%d%s" % (self.address, self.port, stats_httpd.XML_URL_PATH))
# 404 NotFound
- handler.path = '/path/to/foo/bar'
- handler.headers = {}
- handler.do_GET()
- self.assertEqual(handler.response.code, 404)
-
- # failure case(connection with Stats is down)
- handler.path = stats_httpd.XML_URL_PATH
- push_answer(self.stats_httpd)
- self.assertFalse(self.stats_httpd.cc_session._socket._closed)
- self.stats_httpd.cc_session._socket._closed = True
- handler.do_GET()
- self.stats_httpd.cc_session._socket._closed = False
- self.assertEqual(handler.response.code, 500)
- self.stats_httpd.cc_session._clear_queues()
-
- # failure case(Stats module returns err)
- handler.path = stats_httpd.XML_URL_PATH
- self.stats_httpd.cc_session.group_sendmsg(
- { 'result': [ 1, "I have an error." ] }, "Stats")
- self.assertFalse(self.stats_httpd.cc_session._socket._closed)
- self.stats_httpd.cc_session._socket._closed = False
- handler.do_GET()
- self.assertEqual(handler.response.code, 500)
- self.stats_httpd.cc_session._clear_queues()
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', '/path/to/foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+
+ def test_do_GET_failed1(self):
+ # checks status
+ self.assertEqual(send_command("status", "Stats"),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+ # failure case(Stats is down)
+ self.assertTrue(self.stats.running)
+ self.assertEqual(send_shutdown("Stats"), (0, None)) # Stats is down
+ self.assertFalse(self.stats.running)
+ self.stats_httpd.cc_session.set_timeout(milliseconds=100)
+
+ # request XML
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSD
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSL
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ def test_do_GET_failed2(self):
+ # failure case(Stats replies an error)
+ self.stats.mccs.set_command_handler(
+ lambda cmd, args: \
+ isc.config.ccsession.create_answer(1, "I have an error.")
+ )
+
+ # request XML
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSD
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSL
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
def test_do_HEAD(self):
- for ht in self.httpd:
- self._test_do_HEAD(ht._handler)
+ self.client.putrequest('HEAD', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
- def _test_do_HEAD(self, handler):
- handler.path = '/path/to/foo/bar'
- handler.do_HEAD()
- self.assertEqual(handler.response.code, 404)
+ self.client.putrequest('HEAD', '/path/to/foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
class TestHttpServerError(unittest.TestCase):
"""Tests for HttpServerError exception"""
-
def test_raises(self):
try:
raise stats_httpd.HttpServerError('Nothing')
@@ -162,17 +284,24 @@ class TestHttpServerError(unittest.TestCase):
class TestHttpServer(unittest.TestCase):
"""Tests for HttpServer class"""
+ def setUp(self):
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+
+ def tearDown(self):
+ if hasattr(self, "stats_httpd"):
+ self.stats_httpd.stop()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
def test_httpserver(self):
- self.stats_httpd = stats_httpd.StatsHttpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(ht.server_address in self.stats_httpd.http_addrs)
- self.assertEqual(ht.xml_handler, self.stats_httpd.xml_handler)
- self.assertEqual(ht.xsd_handler, self.stats_httpd.xsd_handler)
- self.assertEqual(ht.xsl_handler, self.stats_httpd.xsl_handler)
- self.assertEqual(ht.log_writer, self.stats_httpd.write_log)
- self.assertTrue(isinstance(ht._handler, stats_httpd.HttpHandler))
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertEqual(type(self.stats_httpd.httpd), list)
+ self.assertEqual(len(self.stats_httpd.httpd), 1)
+ for httpd in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(httpd, stats_httpd.HttpServer))
class TestStatsHttpdError(unittest.TestCase):
"""Tests for StatsHttpdError exception"""
@@ -187,132 +316,173 @@ class TestStatsHttpd(unittest.TestCase):
"""Tests for StatsHttpd class"""
def setUp(self):
- fake_socket._CLOSED = False
- fake_socket.has_ipv6 = True
- self.stats_httpd = stats_httpd.StatsHttpd()
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats_server.run()
+ # checking IPv6 enabled on this platform
+ self.ipv6_enabled = is_ipv6_enabled()
def tearDown(self):
- self.stats_httpd.stop()
+ if hasattr(self, "stats_httpd"):
+ self.stats_httpd.stop()
+ self.stats_server.shutdown()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
def test_init(self):
- self.assertFalse(self.stats_httpd.mccs.get_socket()._closed)
- self.assertEqual(self.stats_httpd.mccs.get_socket().fileno(),
- id(self.stats_httpd.mccs.get_socket()))
- for ht in self.stats_httpd.httpd:
- self.assertFalse(ht.socket._closed)
- self.assertEqual(ht.socket.fileno(), id(ht.socket))
- fake_socket._CLOSED = True
- self.assertRaises(isc.cc.session.SessionError,
- stats_httpd.StatsHttpd)
- fake_socket._CLOSED = False
+ server_address = get_availaddr()
+ self.stats_httpd = MyStatsHttpd(server_address)
+ self.assertEqual(self.stats_httpd.running, False)
+ self.assertEqual(self.stats_httpd.poll_intval, 0.5)
+ self.assertNotEqual(len(self.stats_httpd.httpd), 0)
+ self.assertEqual(type(self.stats_httpd.mccs), isc.config.ModuleCCSession)
+ self.assertEqual(type(self.stats_httpd.cc_session), isc.cc.Session)
+ self.assertEqual(len(self.stats_httpd.config), 2)
+ self.assertTrue('listen_on' in self.stats_httpd.config)
+ self.assertEqual(len(self.stats_httpd.config['listen_on']), 1)
+ self.assertTrue('address' in self.stats_httpd.config['listen_on'][0])
+ self.assertTrue('port' in self.stats_httpd.config['listen_on'][0])
+ self.assertTrue(server_address in set(self.stats_httpd.http_addrs))
+
+ def test_openclose_mccs(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.close_mccs()
+ self.assertEqual(self.stats_httpd.mccs, None)
+ self.stats_httpd.open_mccs()
+ self.assertIsNotNone(self.stats_httpd.mccs)
+ self.stats_httpd.mccs = None
+ self.assertEqual(self.stats_httpd.mccs, None)
+ self.assertEqual(self.stats_httpd.close_mccs(), None)
def test_mccs(self):
- self.stats_httpd.open_mccs()
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertIsNotNone(self.stats_httpd.mccs.get_socket())
self.assertTrue(
- isinstance(self.stats_httpd.mccs.get_socket(), fake_socket.socket))
+ isinstance(self.stats_httpd.mccs.get_socket(), socket.socket))
self.assertTrue(
isinstance(self.stats_httpd.cc_session, isc.cc.session.Session))
- self.assertTrue(
- isinstance(self.stats_httpd.stats_module_spec, isc.config.ModuleSpec))
- for cfg in self.stats_httpd.stats_config_spec:
- self.assertTrue('item_name' in cfg)
- self.assertTrue(cfg['item_name'] in DUMMY_DATA)
- self.assertTrue(len(self.stats_httpd.stats_config_spec), len(DUMMY_DATA))
-
- def test_load_config(self):
- self.stats_httpd.load_config()
- self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
+ statistics_spec = self.stats_httpd.get_stats_spec()
+ for mod in DUMMY_DATA:
+ self.assertTrue(mod in statistics_spec)
+ for cfg in statistics_spec[mod]:
+ self.assertTrue('item_name' in cfg)
+ self.assertTrue(cfg['item_name'] in DUMMY_DATA[mod])
+ self.assertTrue(len(statistics_spec[mod]), len(DUMMY_DATA[mod]))
+ self.stats_httpd.close_mccs()
+ self.assertIsNone(self.stats_httpd.mccs)
def test_httpd(self):
# dual stack (addresses is ipv4 and ipv6)
- fake_socket.has_ipv6 = True
- self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
- self.stats_httpd.http_addrs = [ ('::1', 8000), ('127.0.0.1', 8000) ]
- self.assertTrue(
- stats_httpd.HttpServer.address_family in set([fake_socket.AF_INET, fake_socket.AF_INET6]))
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ if self.ipv6_enabled:
+ server_addresses = (get_availaddr('::1'), get_availaddr())
+ self.stats_httpd = MyStatsHttpd(*server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertTrue(ht.address_family in set([socket.AF_INET, socket.AF_INET6]))
+ self.assertTrue(isinstance(ht.socket, socket.socket))
# dual stack (address is ipv6)
- fake_socket.has_ipv6 = True
- self.stats_httpd.http_addrs = [ ('::1', 8000) ]
- self.stats_httpd.open_httpd()
+ if self.ipv6_enabled:
+ server_addresses = get_availaddr('::1')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family, socket.AF_INET6)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # dual/single stack (address is ipv4)
+ server_addresses = get_availaddr()
+ self.stats_httpd = MyStatsHttpd(server_addresses)
for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family, socket.AF_INET)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
- # dual stack (address is ipv4)
- fake_socket.has_ipv6 = True
- self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
- self.stats_httpd.open_httpd()
+ # any address (IPv4)
+ server_addresses = get_availaddr(address='0.0.0.0')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # only-ipv4 single stack
- fake_socket.has_ipv6 = False
- self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # only-ipv4 single stack (force set ipv6 )
- fake_socket.has_ipv6 = False
- self.stats_httpd.http_addrs = [ ('::1', 8000) ]
- self.assertRaises(stats_httpd.HttpServerError,
- self.stats_httpd.open_httpd)
-
- # hostname
- self.stats_httpd.http_addrs = [ ('localhost', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- self.stats_httpd.http_addrs = [ ('my.host.domain', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family,socket.AF_INET)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # any address (IPv6)
+ if self.ipv6_enabled:
+ server_addresses = get_availaddr(address='::')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family,socket.AF_INET6)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # existent hostname
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+ get_availaddr(address='localhost'))
+
+ # nonexistent hostname
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('my.host.domain', 8000))
# over flow of port number
- self.stats_httpd.http_addrs = [ ('', 80000) ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 80000))
+
# negative
- self.stats_httpd.http_addrs = [ ('', -8000) ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
- # alphabet
- self.stats_httpd.http_addrs = [ ('', 'ABCDE') ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
-
- def test_start(self):
- self.stats_httpd.cc_session.group_sendmsg(
- { 'command': [ "shutdown" ] }, "StatsHttpd")
- self.stats_httpd.start()
- self.stats_httpd = stats_httpd.StatsHttpd()
- self.assertRaises(
- fake_select.error, self.stats_httpd.start)
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', -8000))
- def test_stop(self):
- # success case
- fake_socket._CLOSED = False
- self.stats_httpd.stop()
+ # alphabet
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 'ABCDE'))
+
+ # Address already in use
+ server_addresses = get_availaddr()
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, server_addresses)
+ self.stats_httpd_server.run()
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, server_addresses)
+ send_shutdown("StatsHttpd")
+
+ def test_running(self):
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+ self.stats_httpd = self.stats_httpd_server.server
self.assertFalse(self.stats_httpd.running)
- self.assertIsNone(self.stats_httpd.mccs)
- for ht in self.stats_httpd.httpd:
- self.assertTrue(ht.socket._closed)
- self.assertTrue(self.stats_httpd.cc_session._socket._closed)
+ self.stats_httpd_server.run()
+ self.assertEqual(send_command("status", "StatsHttpd"),
+ (0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
+ self.assertTrue(self.stats_httpd.running)
+ self.assertEqual(send_shutdown("StatsHttpd"), (0, None))
+ self.assertFalse(self.stats_httpd.running)
+ self.stats_httpd_server.shutdown()
+
# failure case
- self.stats_httpd.cc_session._socket._closed = False
- self.stats_httpd.open_mccs()
- self.stats_httpd.cc_session._socket._closed = True
- self.stats_httpd.stop() # No excetion raises
- self.stats_httpd.cc_session._socket._closed = False
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.cc_session.close()
+ self.assertRaises(ValueError, self.stats_httpd.start)
+
+ def test_failure_with_a_select_error (self):
+ """checks select.error is raised if the exception except
+ errno.EINTR is raised while it's selecting"""
+ def raise_select_except(*args):
+ raise select.error('dummy error')
+ orig_select = stats_httpd.select.select
+ stats_httpd.select.select = raise_select_except
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertRaises(select.error, self.stats_httpd.start)
+ stats_httpd.select.select = orig_select
+
+ def test_nofailure_with_errno_EINTR(self):
+ """checks no exception is raised if errno.EINTR is raised
+ while it's selecting"""
+ def raise_select_except(*args):
+ raise select.error(errno.EINTR)
+ orig_select = stats_httpd.select.select
+ stats_httpd.select.select = raise_select_except
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+ self.stats_httpd_server.run()
+ self.stats_httpd_server.shutdown()
+ stats_httpd.select.select = orig_select
def test_open_template(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
# successful conditions
tmpl = self.stats_httpd.open_template(stats_httpd.XML_TEMPLATE_LOCATION)
self.assertTrue(isinstance(tmpl, string.Template))
@@ -346,13 +516,13 @@ class TestStatsHttpd(unittest.TestCase):
self.stats_httpd.open_template, '/path/to/foo/bar')
def test_commands(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
self.assertEqual(self.stats_httpd.command_handler("status", None),
isc.config.ccsession.create_answer(
0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
self.stats_httpd.running = True
self.assertEqual(self.stats_httpd.command_handler("shutdown", None),
- isc.config.ccsession.create_answer(
- 0, "Stats Httpd is shutting down."))
+ isc.config.ccsession.create_answer(0))
self.assertFalse(self.stats_httpd.running)
self.assertEqual(
self.stats_httpd.command_handler("__UNKNOWN_COMMAND__", None),
@@ -360,42 +530,48 @@ class TestStatsHttpd(unittest.TestCase):
1, "Unknown command: __UNKNOWN_COMMAND__"))
def test_config(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
self.assertEqual(
self.stats_httpd.config_handler(dict(_UNKNOWN_KEY_=None)),
isc.config.ccsession.create_answer(
- 1, "Unknown known config: _UNKNOWN_KEY_"))
- self.assertEqual(
- self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="::2",port=8000)])),
- isc.config.ccsession.create_answer(0))
- self.assertTrue("listen_on" in self.stats_httpd.config)
- for addr in self.stats_httpd.config["listen_on"]:
- self.assertTrue("address" in addr)
- self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "::2")
- self.assertTrue(addr["port"] == 8000)
+ 1, "unknown item _UNKNOWN_KEY_"))
+ addresses = get_availaddr()
self.assertEqual(
self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="::1",port=80)])),
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
isc.config.ccsession.create_answer(0))
self.assertTrue("listen_on" in self.stats_httpd.config)
for addr in self.stats_httpd.config["listen_on"]:
self.assertTrue("address" in addr)
self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "::1")
- self.assertTrue(addr["port"] == 80)
-
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
+
+ if self.ipv6_enabled:
+ addresses = get_availaddr("::1")
+ self.assertEqual(
+ self.stats_httpd.config_handler(
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
+ isc.config.ccsession.create_answer(0))
+ self.assertTrue("listen_on" in self.stats_httpd.config)
+ for addr in self.stats_httpd.config["listen_on"]:
+ self.assertTrue("address" in addr)
+ self.assertTrue("port" in addr)
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
+
+ addresses = get_availaddr()
self.assertEqual(
self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="1.2.3.4",port=54321)])),
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
isc.config.ccsession.create_answer(0))
self.assertTrue("listen_on" in self.stats_httpd.config)
for addr in self.stats_httpd.config["listen_on"]:
self.assertTrue("address" in addr)
self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "1.2.3.4")
- self.assertTrue(addr["port"] == 54321)
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
(ret, arg) = isc.config.ccsession.parse_answer(
self.stats_httpd.config_handler(
dict(listen_on=[dict(address="1.2.3.4",port=543210)]))
@@ -403,93 +579,103 @@ class TestStatsHttpd(unittest.TestCase):
self.assertEqual(ret, 1)
def test_xml_handler(self):
- orig_get_stats_data = stats_httpd.StatsHttpd.get_stats_data
- stats_httpd.StatsHttpd.get_stats_data = lambda x: {'foo':'bar'}
- xml_body1 = stats_httpd.StatsHttpd().open_template(
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_data = lambda: \
+ { 'Dummy' : { 'foo':'bar' } }
+ xml_body1 = self.stats_httpd.open_template(
stats_httpd.XML_TEMPLATE_LOCATION).substitute(
- xml_string='<foo>bar</foo>',
+ xml_string='<Dummy><foo>bar</foo></Dummy>',
xsd_namespace=stats_httpd.XSD_NAMESPACE,
xsd_url_path=stats_httpd.XSD_URL_PATH,
xsl_url_path=stats_httpd.XSL_URL_PATH)
- xml_body2 = stats_httpd.StatsHttpd().xml_handler()
+ xml_body2 = self.stats_httpd.xml_handler()
self.assertEqual(type(xml_body1), str)
self.assertEqual(type(xml_body2), str)
self.assertEqual(xml_body1, xml_body2)
- stats_httpd.StatsHttpd.get_stats_data = lambda x: {'bar':'foo'}
- xml_body2 = stats_httpd.StatsHttpd().xml_handler()
+ self.stats_httpd.get_stats_data = lambda: \
+ { 'Dummy' : {'bar':'foo'} }
+ xml_body2 = self.stats_httpd.xml_handler()
self.assertNotEqual(xml_body1, xml_body2)
- stats_httpd.StatsHttpd.get_stats_data = orig_get_stats_data
def test_xsd_handler(self):
- orig_get_stats_spec = stats_httpd.StatsHttpd.get_stats_spec
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "foo",
- "item_type": "string",
- "item_optional": False,
- "item_default": "bar",
- "item_description": "foo is bar",
- "item_title": "Foo"
- }]
- xsd_body1 = stats_httpd.StatsHttpd().open_template(
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ }
+ xsd_body1 = self.stats_httpd.open_template(
stats_httpd.XSD_TEMPLATE_LOCATION).substitute(
- xsd_string='<all>' \
+ xsd_string=\
+ '<all><element name="Dummy"><complexType><all>' \
+ '<element maxOccurs="1" minOccurs="1" name="foo" type="string">' \
+ '<annotation><appinfo>Foo</appinfo>' \
+ '<documentation>foo is bar</documentation>' \
- + '</annotation></element></all>',
+ + '</annotation></element></all>' \
+ + '</complexType></element></all>',
xsd_namespace=stats_httpd.XSD_NAMESPACE)
- xsd_body2 = stats_httpd.StatsHttpd().xsd_handler()
+ xsd_body2 = self.stats_httpd.xsd_handler()
self.assertEqual(type(xsd_body1), str)
self.assertEqual(type(xsd_body2), str)
self.assertEqual(xsd_body1, xsd_body2)
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "bar",
- "item_type": "string",
- "item_optional": False,
- "item_default": "foo",
- "item_description": "bar is foo",
- "item_title": "bar"
- }]
- xsd_body2 = stats_httpd.StatsHttpd().xsd_handler()
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ }
+ xsd_body2 = self.stats_httpd.xsd_handler()
self.assertNotEqual(xsd_body1, xsd_body2)
- stats_httpd.StatsHttpd.get_stats_spec = orig_get_stats_spec
def test_xsl_handler(self):
- orig_get_stats_spec = stats_httpd.StatsHttpd.get_stats_spec
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "foo",
- "item_type": "string",
- "item_optional": False,
- "item_default": "bar",
- "item_description": "foo is bar",
- "item_title": "Foo"
- }]
- xsl_body1 = stats_httpd.StatsHttpd().open_template(
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ }
+ xsl_body1 = self.stats_httpd.open_template(
stats_httpd.XSL_TEMPLATE_LOCATION).substitute(
xsl_string='<xsl:template match="*"><tr>' \
+ + '<td>Dummy</td>' \
+ '<td class="title" title="foo is bar">Foo</td>' \
- + '<td><xsl:value-of select="foo" /></td>' \
+ + '<td><xsl:value-of select="Dummy/foo" /></td>' \
+ '</tr></xsl:template>',
xsd_namespace=stats_httpd.XSD_NAMESPACE)
- xsl_body2 = stats_httpd.StatsHttpd().xsl_handler()
+ xsl_body2 = self.stats_httpd.xsl_handler()
self.assertEqual(type(xsl_body1), str)
self.assertEqual(type(xsl_body2), str)
self.assertEqual(xsl_body1, xsl_body2)
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "bar",
- "item_type": "string",
- "item_optional": False,
- "item_default": "foo",
- "item_description": "bar is foo",
- "item_title": "bar"
- }]
- xsl_body2 = stats_httpd.StatsHttpd().xsl_handler()
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ }
+ xsl_body2 = self.stats_httpd.xsl_handler()
self.assertNotEqual(xsl_body1, xsl_body2)
- stats_httpd.StatsHttpd.get_stats_spec = orig_get_stats_spec
def test_for_without_B10_FROM_SOURCE(self):
# just lets it go through the code without B10_FROM_SOURCE env
@@ -500,8 +686,6 @@ class TestStatsHttpd(unittest.TestCase):
imp.reload(stats_httpd)
os.environ["B10_FROM_SOURCE"] = tmppath
imp.reload(stats_httpd)
- stats_httpd.socket = fake_socket
- stats_httpd.select = fake_select
if __name__ == "__main__":
unittest.main()
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index a42c81d..3813c7e 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -13,649 +13,593 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#
-# Tests for the stats module
-#
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats module in a close
+to real environment.
+"""
+
+import unittest
import os
-import sys
+import threading
+import io
import time
-import unittest
import imp
-from isc.cc.session import Session, SessionError
-from isc.config.ccsession import ModuleCCSession, ModuleCCSessionError
-from fake_time import time, strftime, gmtime
-import stats
-stats.time = time
-stats.strftime = strftime
-stats.gmtime = gmtime
-from stats import SessionSubject, CCSessionListener, get_timestamp, get_datetime
-from fake_time import _TEST_TIME_SECS, _TEST_TIME_STRF
-
-if "B10_FROM_SOURCE" in os.environ:
- TEST_SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] +\
- "/src/bin/stats/tests/testdata/stats_test.spec"
-else:
- TEST_SPECFILE_LOCATION = "./testdata/stats_test.spec"
-class TestStats(unittest.TestCase):
+import stats
+import isc.cc.session
+from test_utils import BaseModules, ThreadingServerManager, MyStats, SignalHandler, send_command, send_shutdown
+
+class TestUtilties(unittest.TestCase):
+ items = [
+ { 'item_name': 'test_int1', 'item_type': 'integer', 'item_default': 12345 },
+ { 'item_name': 'test_real1', 'item_type': 'real', 'item_default': 12345.6789 },
+ { 'item_name': 'test_bool1', 'item_type': 'boolean', 'item_default': True },
+ { 'item_name': 'test_str1', 'item_type': 'string', 'item_default': 'ABCD' },
+ { 'item_name': 'test_list1', 'item_type': 'list', 'item_default': [1,2,3],
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'integer' } },
+ { 'item_name': 'test_map1', 'item_type': 'map', 'item_default': {'a':1,'b':2,'c':3},
+ 'map_item_spec' : [ { 'item_name': 'a', 'item_type': 'integer'},
+ { 'item_name': 'b', 'item_type': 'integer'},
+ { 'item_name': 'c', 'item_type': 'integer'} ] },
+ { 'item_name': 'test_int2', 'item_type': 'integer' },
+ { 'item_name': 'test_real2', 'item_type': 'real' },
+ { 'item_name': 'test_bool2', 'item_type': 'boolean' },
+ { 'item_name': 'test_str2', 'item_type': 'string' },
+ { 'item_name': 'test_list2', 'item_type': 'list',
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'integer' } },
+ { 'item_name': 'test_map2', 'item_type': 'map',
+ 'map_item_spec' : [ { 'item_name': 'A', 'item_type': 'integer'},
+ { 'item_name': 'B', 'item_type': 'integer'},
+ { 'item_name': 'C', 'item_type': 'integer'} ] },
+ { 'item_name': 'test_none', 'item_type': 'none' },
+ { 'item_name': 'test_list3', 'item_type': 'list', 'item_default': ["one","two","three"],
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'string' } },
+ { 'item_name': 'test_map3', 'item_type': 'map', 'item_default': {'a':'one','b':'two','c':'three'},
+ 'map_item_spec' : [ { 'item_name': 'a', 'item_type': 'string'},
+ { 'item_name': 'b', 'item_type': 'string'},
+ { 'item_name': 'c', 'item_type': 'string'} ] }
+ ]
def setUp(self):
- self.session = Session()
- self.subject = SessionSubject(session=self.session)
- self.listener = CCSessionListener(self.subject)
- self.stats_spec = self.listener.cc_session.get_module_spec().get_config_spec()
- self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
- self.stats_data = {
- 'report_time' : get_datetime(),
- 'bind10.boot_time' : "1970-01-01T00:00:00Z",
- 'stats.timestamp' : get_timestamp(),
- 'stats.lname' : self.session.lname,
- 'auth.queries.tcp': 0,
- 'auth.queries.udp': 0,
- "stats.boot_time": get_datetime(),
- "stats.start_time": get_datetime(),
- "stats.last_update_time": get_datetime()
- }
- # check starting
- self.assertFalse(self.subject.running)
- self.subject.start()
- self.assertTrue(self.subject.running)
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.module_name, 'Stats')
-
- def tearDown(self):
- # check closing
- self.subject.stop()
- self.assertFalse(self.subject.running)
- self.subject.detach(self.listener)
- self.listener.stop()
- self.session.close()
-
- def test_local_func(self):
- """
- Test for local function
-
- """
- # test for result_ok
- self.assertEqual(type(result_ok()), dict)
- self.assertEqual(result_ok(), {'result': [0]})
- self.assertEqual(result_ok(1), {'result': [1]})
- self.assertEqual(result_ok(0,'OK'), {'result': [0, 'OK']})
- self.assertEqual(result_ok(1,'Not good'), {'result': [1, 'Not good']})
- self.assertEqual(result_ok(None,"It's None"), {'result': [None, "It's None"]})
- self.assertNotEqual(result_ok(), {'RESULT': [0]})
-
- # test for get_timestamp
- self.assertEqual(get_timestamp(), _TEST_TIME_SECS)
-
- # test for get_datetime
- self.assertEqual(get_datetime(), _TEST_TIME_STRF)
-
- def test_show_command(self):
- """
- Test for show command
-
- """
- # test show command without arg
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- # ignore under 0.9 seconds
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command with arg
- self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.lname"}]}, "Stats")
- self.assertEqual(len(self.subject.session.message_queue), 1)
- self.subject.check()
- result_data = self.subject.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'stats.lname': self.stats_data['stats.lname']}),
- result_data)
- self.assertEqual(len(self.subject.session.message_queue), 0)
-
- # test show command with arg which has wrong name
- self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.dummy"}]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- # ignore under 0.9 seconds
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_set_command(self):
- """
- Test for set command
-
- """
- # test set command
- self.stats_data['auth.queries.udp'] = 54321
- self.assertEqual(self.stats_data['auth.queries.udp'], 54321)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'auth.queries.udp': 54321 }
- } ] },
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 2
- self.stats_data['auth.queries.udp'] = 0
- self.assertEqual(self.stats_data['auth.queries.udp'], 0)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
- self.session.group_sendmsg({ "command": [ "set", {'stats_data': {'auth.queries.udp': 0}} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command 2
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 3
- self.stats_data['auth.queries.tcp'] = 54322
- self.assertEqual(self.stats_data['auth.queries.udp'], 0)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 54322)
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'auth.queries.tcp': 54322 }
- } ] },
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command 3
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_remove_command(self):
- """
- Test for remove command
-
- """
- self.session.group_sendmsg({"command":
- [ "remove", {"stats_item_name": 'bind10.boot_time' }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.stats_data.pop('bind10.boot_time'), "1970-01-01T00:00:00Z")
- self.assertFalse('bind10.boot_time' in self.stats_data)
-
- # test show command with arg
- self.session.group_sendmsg({"command":
- [ "show", {"stats_item_name": 'bind10.boot_time'}]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertFalse('bind10.boot_time' in result_data['result'][1])
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_reset_command(self):
- """
- Test for reset command
-
- """
- self.session.group_sendmsg({"command": [ "reset" ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command
- self.session.group_sendmsg({"command": [ "show" ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_status_command(self):
- """
- Test for status command
-
- """
- self.session.group_sendmsg({"command": [ "status" ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(0, "I'm alive."),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_unknown_command(self):
- """
- Test for unknown command
-
- """
- self.session.group_sendmsg({"command": [ "hoge", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(1, "Unknown command: 'hoge'"),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_shutdown_command(self):
- """
- Test for shutdown command
-
- """
- self.session.group_sendmsg({"command": [ "shutdown", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.assertTrue(self.subject.running)
- self.subject.check()
- self.assertFalse(self.subject.running)
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
+ self.const_timestamp = 1308730448.965706
+ self.const_timetuple = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+ self.const_datetime = '2011-06-22T08:14:08Z'
+ stats.time = lambda : self.const_timestamp
+ stats.gmtime = lambda : self.const_timetuple
- def test_some_commands(self):
- """
- Test for some commands in a row
-
- """
- # test set command
- self.stats_data['bind10.boot_time'] = '2010-08-02T14:47:56Z'
- self.assertEqual(self.stats_data['bind10.boot_time'], '2010-08-02T14:47:56Z')
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'bind10.boot_time': '2010-08-02T14:47:56Z' }
- }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'bind10.boot_time' }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'bind10.boot_time': '2010-08-02T14:47:56Z'}),
- result_data)
- self.assertEqual(result_ok(0, {'bind10.boot_time': self.stats_data['bind10.boot_time']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 2nd
- self.stats_data['auth.queries.udp'] = 98765
- self.assertEqual(self.stats_data['auth.queries.udp'], 98765)
- self.session.group_sendmsg({ "command": [
- "set", { 'stats_data': {
- 'auth.queries.udp':
- self.stats_data['auth.queries.udp']
- } }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({"command": [
- "show", {'stats_item_name': 'auth.queries.udp'}
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 3
- self.stats_data['auth.queries.tcp'] = 4321
- self.session.group_sendmsg({"command": [
- "set",
- {'stats_data': {'auth.queries.tcp': 4321 }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check value
- self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.tcp'} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.tcp': 4321}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.tcp': self.stats_data['auth.queries.tcp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.udp'} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 4
- self.stats_data['auth.queries.tcp'] = 67890
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'auth.queries.tcp': 67890 }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command for all values
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_some_commands2(self):
- """
- Test for some commands in a row using list-type value
-
- """
- self.stats_data['listtype'] = [1, 2, 3]
- self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
- self.session.group_sendmsg({ "command": [
- "set", {'stats_data': {'listtype': [1, 2, 3] }}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'listtype'}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'listtype': [1, 2, 3]}),
- result_data)
- self.assertEqual(result_ok(0, {'listtype': self.stats_data['listtype']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set list-type value
- self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'listtype': [3, 2, 1, 0] }}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'listtype' }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'listtype': [3, 2, 1, 0]}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_some_commands3(self):
- """
- Test for some commands in a row using dictionary-type value
-
- """
- self.stats_data['dicttype'] = {"a": 1, "b": 2, "c": 3}
- self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'dicttype': {"a": 1, "b": 2, "c": 3} }
- }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' } ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'dicttype': {"a": 1, "b": 2, "c": 3}}),
- result_data)
- self.assertEqual(result_ok(0, {'dicttype': self.stats_data['dicttype']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set list-type value
- self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' }]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_config_update(self):
- """
- Test for config update
-
- """
- # test show command without arg
- self.session.group_sendmsg({"command": [ "config_update", {"x-version":999} ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
-
- def test_for_boss(self):
- last_queue = self.session.old_message_queue.pop()
- self.assertEqual(
- last_queue.msg, {'command': ['sendstats']})
+ def test_get_spec_defaults(self):
self.assertEqual(
- last_queue.env['group'], 'Boss')
-
-class TestStats2(unittest.TestCase):
+ stats.get_spec_defaults(self.items), {
+ 'test_int1' : 12345 ,
+ 'test_real1' : 12345.6789 ,
+ 'test_bool1' : True ,
+ 'test_str1' : 'ABCD' ,
+ 'test_list1' : [1,2,3] ,
+ 'test_map1' : {'a':1,'b':2,'c':3},
+ 'test_int2' : 0 ,
+ 'test_real2' : 0.0,
+ 'test_bool2' : False,
+ 'test_str2' : "",
+ 'test_list2' : [0],
+ 'test_map2' : { 'A' : 0, 'B' : 0, 'C' : 0 },
+ 'test_none' : None,
+ 'test_list3' : [ "one", "two", "three" ],
+ 'test_map3' : { 'a' : 'one', 'b' : 'two', 'c' : 'three' } })
+ self.assertEqual(stats.get_spec_defaults(None), {})
+ self.assertRaises(KeyError, stats.get_spec_defaults, [{'item_name':'Foo'}])
+
+ def test_get_timestamp(self):
+ self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+
+ def test_get_datetime(self):
+ self.assertEqual(stats.get_datetime(), self.const_datetime)
+ self.assertNotEqual(stats.get_datetime(
+ (2011, 6, 22, 8, 23, 40, 2, 173, 0)), self.const_datetime)
+
+class TestCallback(unittest.TestCase):
+ def setUp(self):
+ self.dummy_func = lambda *x, **y : (x, y)
+ self.dummy_args = (1,2,3)
+ self.dummy_kwargs = {'a':1,'b':2,'c':3}
+ self.cback1 = stats.Callback(
+ command=self.dummy_func,
+ args=self.dummy_args,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback2 = stats.Callback(
+ args=self.dummy_args,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback3 = stats.Callback(
+ command=self.dummy_func,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback4 = stats.Callback(
+ command=self.dummy_func,
+ args=self.dummy_args
+ )
+
+ def test_init(self):
+ self.assertEqual((self.cback1.command, self.cback1.args, self.cback1.kwargs),
+ (self.dummy_func, self.dummy_args, self.dummy_kwargs))
+ self.assertEqual((self.cback2.command, self.cback2.args, self.cback2.kwargs),
+ (None, self.dummy_args, self.dummy_kwargs))
+ self.assertEqual((self.cback3.command, self.cback3.args, self.cback3.kwargs),
+ (self.dummy_func, (), self.dummy_kwargs))
+ self.assertEqual((self.cback4.command, self.cback4.args, self.cback4.kwargs),
+ (self.dummy_func, self.dummy_args, {}))
+
+ def test_call(self):
+ self.assertEqual(self.cback1(), (self.dummy_args, self.dummy_kwargs))
+ self.assertEqual(self.cback1(100, 200), ((100, 200), self.dummy_kwargs))
+ self.assertEqual(self.cback1(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+ self.assertEqual(self.cback2(), None)
+ self.assertEqual(self.cback3(), ((), self.dummy_kwargs))
+ self.assertEqual(self.cback3(100, 200), ((100, 200), self.dummy_kwargs))
+ self.assertEqual(self.cback3(a=100, b=200), ((), {'a':100, 'b':200}))
+ self.assertEqual(self.cback4(), (self.dummy_args, {}))
+ self.assertEqual(self.cback4(100, 200), ((100, 200), {}))
+ self.assertEqual(self.cback4(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+class TestStats(unittest.TestCase):
def setUp(self):
- self.session = Session()
- self.subject = SessionSubject(session=self.session)
- self.listener = CCSessionListener(self.subject)
- self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
- # check starting
- self.assertFalse(self.subject.running)
- self.subject.start()
- self.assertTrue(self.subject.running)
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.module_name, 'Stats')
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats = stats.Stats()
+ self.const_timestamp = 1308730448.965706
+ self.const_datetime = '2011-06-22T08:14:08Z'
+ self.const_default_datetime = '1970-01-01T00:00:00Z'
def tearDown(self):
- # check closing
- self.subject.stop()
- self.assertFalse(self.subject.running)
- self.subject.detach(self.listener)
- self.listener.stop()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
+
+ def test_init(self):
+ self.assertEqual(self.stats.module_name, 'Stats')
+ self.assertFalse(self.stats.running)
+ self.assertTrue('command_show' in self.stats.callbacks)
+ self.assertTrue('command_status' in self.stats.callbacks)
+ self.assertTrue('command_shutdown' in self.stats.callbacks)
+ self.assertTrue('command_show' in self.stats.callbacks)
+ self.assertTrue('command_showschema' in self.stats.callbacks)
+ self.assertTrue('command_set' in self.stats.callbacks)
+
+ def test_init_undefcmd(self):
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Stats",
+ "module_description": "Stats daemon",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "_undef_command_",
+ "command_description": "a undefined command in stats",
+ "command_args": []
+ }
+ ],
+ "statistics": []
+ }
+}
+"""
+ orig_spec_location = stats.SPECFILE_LOCATION
+ stats.SPECFILE_LOCATION = io.StringIO(spec_str)
+ self.assertRaises(stats.StatsError, stats.Stats)
+ stats.SPECFILE_LOCATION = orig_spec_location
+
+ def test_start(self):
+ # start without err
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.assertFalse(self.stats.running)
+ self.stats_server.run()
+ self.assertEqual(send_command("status", "Stats"),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+ self.assertTrue(self.stats.running)
+ self.assertEqual(send_shutdown("Stats"), (0, None))
+ self.assertFalse(self.stats.running)
+ self.stats_server.shutdown()
+
+ # start with err
+ self.stats = stats.Stats()
+ self.stats.update_statistics_data = lambda x,**y: ['an error']
+ self.assertRaises(stats.StatsError, self.stats.start)
+
+ def test_handlers(self):
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.stats_server.run()
+ # config_handler
+ self.assertEqual(self.stats.config_handler({'foo':'bar'}),
+ isc.config.create_answer(0))
+
+ # command_handler
+ self.base.boss.server._started.wait()
+ self.base.boss.server._started.clear()
+ self.assertEqual(
+ send_command(
+ 'show', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'name' : 'boot_time' }),
+ (0, self.const_datetime))
+ self.assertEqual(
+ send_command(
+ 'set', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'data' : { 'boot_time' : self.const_datetime } }),
+ (0, None))
+ self.assertEqual(
+ send_command(
+ 'show', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'name' : 'boot_time' }),
+ (0, self.const_datetime))
+ self.assertEqual(
+ send_command('status', 'Stats'),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+ (rcode, value) = send_command('show', 'Stats')
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Auth' in value)
+ self.assertEqual(len(value['Stats']), 5)
+ self.assertEqual(len(value['Boss']), 1)
+ self.assertTrue('boot_time' in value['Boss'])
+ self.assertEqual(value['Boss']['boot_time'], self.const_datetime)
+ self.assertTrue('report_time' in value['Stats'])
+ self.assertTrue('boot_time' in value['Stats'])
+ self.assertTrue('last_update_time' in value['Stats'])
+ self.assertTrue('timestamp' in value['Stats'])
+ self.assertTrue('lname' in value['Stats'])
+ (rcode, value) = send_command('showschema', 'Stats')
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Auth' in value)
+ self.assertEqual(len(value['Stats']), 5)
+ self.assertEqual(len(value['Boss']), 1)
+ for item in value['Boss']:
+ self.assertTrue(len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ self.assertTrue('item_format' in item)
+ for item in value['Stats']:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
- def test_specfile(self):
+ self.assertEqual(
+ send_command('__UNKNOWN__', 'Stats'),
+ (1, "Unknown command: '__UNKNOWN__'"))
+
+ self.stats_server.shutdown()
+
+ def test_update_modules(self):
+ self.assertEqual(len(self.stats.modules), 0)
+ self.stats.update_modules()
+ self.assertTrue('Stats' in self.stats.modules)
+ self.assertTrue('Boss' in self.stats.modules)
+ self.assertFalse('Dummy' in self.stats.modules)
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Stats'].get_statistics_spec())
+ self.assertTrue('report_time' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertTrue('last_update_time' in my_statistics_data)
+ self.assertTrue('timestamp' in my_statistics_data)
+ self.assertTrue('lname' in my_statistics_data)
+ self.assertEqual(my_statistics_data['report_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['last_update_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['timestamp'], 0.0)
+ self.assertEqual(my_statistics_data['lname'], "")
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Boss'].get_statistics_spec())
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+ orig_parse_answer = stats.isc.config.ccsession.parse_answer
+ stats.isc.config.ccsession.parse_answer = lambda x: (99, 'error')
+ self.assertRaises(stats.StatsError, self.stats.update_modules)
+ stats.isc.config.ccsession.parse_answer = orig_parse_answer
+
+ def test_get_statistics_data(self):
+ my_statistics_data = self.stats.get_statistics_data()
+ self.assertTrue('Stats' in my_statistics_data)
+ self.assertTrue('Boss' in my_statistics_data)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+ self.assertTrue('report_time' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertTrue('last_update_time' in my_statistics_data)
+ self.assertTrue('timestamp' in my_statistics_data)
+ self.assertTrue('lname' in my_statistics_data)
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data, owner='Foo')
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+ self.assertTrue('boot_time' in my_statistics_data)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='report_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='boot_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='last_update_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='timestamp')
+ self.assertEqual(my_statistics_data, 0.0)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='lname')
+ self.assertEqual(my_statistics_data, '')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ owner='Stats', name='Bar')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ owner='Foo', name='Bar')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ name='Bar')
+
+ def test_update_statistics_data(self):
+ self.stats.update_statistics_data(owner='Stats', lname='foo at bar')
+ self.assertTrue('Stats' in self.stats.statistics_data)
+ my_statistics_data = self.stats.statistics_data['Stats']
+ self.assertEqual(my_statistics_data['lname'], 'foo at bar')
+ self.stats.update_statistics_data(owner='Stats', last_update_time=self.const_datetime)
+ self.assertTrue('Stats' in self.stats.statistics_data)
+ my_statistics_data = self.stats.statistics_data['Stats']
+ self.assertEqual(my_statistics_data['last_update_time'], self.const_datetime)
+ self.assertEqual(self.stats.update_statistics_data(owner='Stats', lname=0.0),
+ ['0.0 should be a string'])
+ self.assertEqual(self.stats.update_statistics_data(owner='Dummy', foo='bar'),
+ ['unknown module name: Dummy'])
+
+ def test_commands(self):
+ # status
+ self.assertEqual(self.stats.command_status(),
+ isc.config.create_answer(
+ 0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+ # shutdown
+ self.stats.running = True
+ self.assertEqual(self.stats.command_shutdown(),
+ isc.config.create_answer(0))
+ self.assertFalse(self.stats.running)
+
+ def test_command_show(self):
+ self.assertEqual(self.stats.command_show(owner='Foo', name=None),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: None"))
+ self.assertEqual(self.stats.command_show(owner='Foo', name='_bar_'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: _bar_"))
+ self.assertEqual(self.stats.command_show(owner='Foo', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: bar"))
+ self.assertEqual(self.stats.command_show(owner='Auth'),
+ isc.config.create_answer(
+ 0, {'queries.tcp': 0, 'queries.udp': 0}))
+ self.assertEqual(self.stats.command_show(owner='Auth', name='queries.udp'),
+ isc.config.create_answer(
+ 0, 0))
+ orig_get_timestamp = stats.get_timestamp
+ orig_get_datetime = stats.get_datetime
+ stats.get_timestamp = lambda : self.const_timestamp
+ stats.get_datetime = lambda : self.const_datetime
+ self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+ self.assertEqual(stats.get_datetime(), self.const_datetime)
+ self.assertEqual(self.stats.command_show(owner='Stats', name='report_time'), \
+ isc.config.create_answer(0, self.const_datetime))
+ self.assertEqual(self.stats.statistics_data['Stats']['timestamp'], self.const_timestamp)
+ self.assertEqual(self.stats.statistics_data['Stats']['boot_time'], self.const_default_datetime)
+ stats.get_timestamp = orig_get_timestamp
+ stats.get_datetime = orig_get_datetime
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [] } )
+ self.assertRaises(
+ stats.StatsError, self.stats.command_show, owner='Foo', name='bar')
+
+ def test_command_showchema(self):
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema())
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Auth' in value)
+ self.assertFalse('__Dummy__' in value)
+ schema = value['Stats']
+ self.assertEqual(len(schema), 5)
+ for item in schema:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
+
+ schema = value['Boss']
+ self.assertEqual(len(schema), 1)
+ for item in schema:
+ self.assertTrue(len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ self.assertTrue('item_format' in item)
+
+ schema = value['Auth']
+ self.assertEqual(len(schema), 2)
+ for item in schema:
+ self.assertTrue(len(item) == 6)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema(owner='Stats'))
+ self.assertEqual(rcode, 0)
+ self.assertFalse('Stats' in value)
+ self.assertFalse('Boss' in value)
+ self.assertFalse('Auth' in value)
+ for item in value:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
+
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema(owner='Stats', name='report_time'))
+ self.assertEqual(rcode, 0)
+ self.assertFalse('Stats' in value)
+ self.assertFalse('Boss' in value)
+ self.assertFalse('Auth' in value)
+ self.assertTrue(len(value) == 7)
+ self.assertTrue('item_name' in value)
+ self.assertTrue('item_type' in value)
+ self.assertTrue('item_optional' in value)
+ self.assertTrue('item_default' in value)
+ self.assertTrue('item_title' in value)
+ self.assertTrue('item_description' in value)
+ self.assertTrue('item_format' in value)
+ self.assertEqual(value['item_name'], 'report_time')
+ self.assertEqual(value['item_format'], 'date-time')
+
+ self.assertEqual(self.stats.command_showschema(owner='Foo'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: None"))
+ self.assertEqual(self.stats.command_showschema(owner='Foo', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: bar"))
+ self.assertEqual(self.stats.command_showschema(owner='Auth'),
+ isc.config.create_answer(
+ 0, [{
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+ "item_name": "queries.tcp",
+ "item_optional": False,
+ "item_title": "Queries TCP",
+ "item_type": "integer"
+ },
+ {
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially",
+ "item_name": "queries.udp",
+ "item_optional": False,
+ "item_title": "Queries UDP",
+ "item_type": "integer"
+ }]))
+ self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.tcp'),
+ isc.config.create_answer(
+ 0, {
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+ "item_name": "queries.tcp",
+ "item_optional": False,
+ "item_title": "Queries TCP",
+ "item_type": "integer"
+ }))
+
+ self.assertEqual(self.stats.command_showschema(owner='Stats', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Stats, name: bar"))
+ self.assertEqual(self.stats.command_showschema(name='bar'),
+ isc.config.create_answer(
+ 1, "module name is not specified"))
+
+ def test_command_set(self):
+ orig_get_datetime = stats.get_datetime
+ stats.get_datetime = lambda : self.const_datetime
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_set(owner='Boss',
+ data={ 'boot_time' : self.const_datetime }))
+ stats.get_datetime = orig_get_datetime
+ self.assertEqual(rcode, 0)
+ self.assertTrue(value is None)
+ self.assertEqual(self.stats.statistics_data['Boss']['boot_time'],
+ self.const_datetime)
+ self.assertEqual(self.stats.statistics_data['Stats']['last_update_time'],
+ self.const_datetime)
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : 'foo at bar' }),
+ isc.config.create_answer(0, None))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [] } )
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : '_foo_ at _bar_' }),
+ isc.config.create_answer(
+ 1,
+ "errors while setting statistics data: unknown item lname"))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name } )
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : '_foo_ at _bar_' }),
+ isc.config.create_answer(
+ 1,
+ "errors while setting statistics data: No statistics specification"))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [
+ {
+ "item_name": "dummy",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Local Name",
+ "item_description": "brabra"
+ } ] } )
+ self.assertRaises(stats.StatsError,
+ self.stats.command_set, owner='Stats', data={ 'dummy' : '_xxxx_yyyy_zzz_' })
+
+class TestOSEnv(unittest.TestCase):
+ def test_osenv(self):
"""
- Test for specfile
-
+ test for the environ variable "B10_FROM_SOURCE"
+ "B10_FROM_SOURCE" is set in Makefile
"""
- if "B10_FROM_SOURCE" in os.environ:
- self.assertEqual(stats.SPECFILE_LOCATION,
+ # test case having B10_FROM_SOURCE
+ self.assertTrue("B10_FROM_SOURCE" in os.environ)
+ self.assertEqual(stats.SPECFILE_LOCATION, \
os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + \
- os.sep + "stats.spec")
- self.assertEqual(stats.SCHEMA_SPECFILE_LOCATION,
- os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + \
- os.sep + "stats-schema.spec")
+ "src" + os.sep + "bin" + os.sep + "stats" + \
+ os.sep + "stats.spec")
+ # test case not having B10_FROM_SOURCE
+ path = os.environ["B10_FROM_SOURCE"]
+ os.environ.pop("B10_FROM_SOURCE")
+ self.assertFalse("B10_FROM_SOURCE" in os.environ)
+ # import stats again
+ imp.reload(stats)
+ # revert the changes
+ os.environ["B10_FROM_SOURCE"] = path
imp.reload(stats)
- # change path of SPECFILE_LOCATION
- stats.SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
- stats.SCHEMA_SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
- self.assertEqual(stats.SPECFILE_LOCATION, TEST_SPECFILE_LOCATION)
- self.subject = stats.SessionSubject(session=self.session)
- self.session = self.subject.session
- self.listener = stats.CCSessionListener(self.subject)
-
- self.assertEqual(self.listener.stats_spec, [])
- self.assertEqual(self.listener.stats_data, {})
-
- self.assertEqual(self.listener.commands_spec, [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "the_dummy",
- "command_description": "this is for testing",
- "command_args": []
- }])
-
- def test_func_initialize_data(self):
- """
- Test for initialize_data function
-
- """
- # prepare for sample data set
- stats_spec = [
- {
- "item_name": "none_sample",
- "item_type": "null",
- "item_default": "None"
- },
- {
- "item_name": "boolean_sample",
- "item_type": "boolean",
- "item_default": True
- },
- {
- "item_name": "string_sample",
- "item_type": "string",
- "item_default": "A something"
- },
- {
- "item_name": "int_sample",
- "item_type": "integer",
- "item_default": 9999999
- },
- {
- "item_name": "real_sample",
- "item_type": "real",
- "item_default": 0.0009
- },
- {
- "item_name": "list_sample",
- "item_type": "list",
- "item_default": [0, 1, 2, 3, 4],
- "list_item_spec": []
- },
- {
- "item_name": "map_sample",
- "item_type": "map",
- "item_default": {'name':'value'},
- "map_item_spec": []
- },
- {
- "item_name": "other_sample",
- "item_type": "__unknown__",
- "item_default": "__unknown__"
- }
- ]
- # data for comparison
- stats_data = {
- 'none_sample': None,
- 'boolean_sample': True,
- 'string_sample': 'A something',
- 'int_sample': 9999999,
- 'real_sample': 0.0009,
- 'list_sample': [0, 1, 2, 3, 4],
- 'map_sample': {'name':'value'},
- 'other_sample': '__unknown__'
- }
- self.assertEqual(self.listener.initialize_data(stats_spec), stats_data)
-
- def test_func_main(self):
- # explicitly make failed
- self.session.close()
- stats.main(session=self.session)
- def test_osenv(self):
- """
- test for not having environ "B10_FROM_SOURCE"
- """
- if "B10_FROM_SOURCE" in os.environ:
- path = os.environ["B10_FROM_SOURCE"]
- os.environ.pop("B10_FROM_SOURCE")
- imp.reload(stats)
- os.environ["B10_FROM_SOURCE"] = path
- imp.reload(stats)
-
-def result_ok(*args):
- if args:
- return { 'result': list(args) }
- else:
- return { 'result': [ 0 ] }
+def test_main():
+ unittest.main()
if __name__ == "__main__":
- unittest.main()
+ test_main()
diff --git a/src/bin/stats/tests/fake_select.py b/src/bin/stats/tests/fake_select.py
deleted file mode 100644
index ca0ca82..0000000
--- a/src/bin/stats/tests/fake_select.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of select
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-import errno
-
-class error(Exception):
- pass
-
-def select(rlst, wlst, xlst, timeout):
- if type(timeout) != int and type(timeout) != float:
- raise TypeError("Error: %s must be integer or float"
- % timeout.__class__.__name__)
- for s in rlst + wlst + xlst:
- if type(s) != fake_socket.socket:
- raise TypeError("Error: %s must be a dummy socket"
- % s.__class__.__name__)
- s._called = s._called + 1
- if s._called > 3:
- raise error("Something is happened!")
- elif s._called > 2:
- raise error(errno.EINTR)
- return (rlst, wlst, xlst)
diff --git a/src/bin/stats/tests/fake_socket.py b/src/bin/stats/tests/fake_socket.py
deleted file mode 100644
index 4e3a458..0000000
--- a/src/bin/stats/tests/fake_socket.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of socket
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import re
-
-AF_INET = 'AF_INET'
-AF_INET6 = 'AF_INET6'
-_ADDRFAMILY = AF_INET
-has_ipv6 = True
-_CLOSED = False
-
-class gaierror(Exception):
- pass
-
-class error(Exception):
- pass
-
-class socket:
-
- def __init__(self, family=None):
- if family is None:
- self.address_family = _ADDRFAMILY
- else:
- self.address_family = family
- self._closed = _CLOSED
- if self._closed:
- raise error('socket is already closed!')
- self._called = 0
-
- def close(self):
- self._closed = True
-
- def fileno(self):
- return id(self)
-
- def bind(self, server_class):
- (self.server_address, self.server_port) = server_class
- if self.address_family not in set([AF_INET, AF_INET6]):
- raise error("Address family not supported by protocol: %s" % self.address_family)
- if self.address_family == AF_INET6 and not has_ipv6:
- raise error("Address family not supported in this machine: %s has_ipv6: %s"
- % (self.address_family, str(has_ipv6)))
- if self.address_family == AF_INET and re.search(':', self.server_address) is not None:
- raise gaierror("Address family for hostname not supported : %s %s" % (self.server_address, self.address_family))
- if self.address_family == AF_INET6 and re.search(':', self.server_address) is None:
- raise error("Cannot assign requested address : %s" % str(self.server_address))
- if type(self.server_port) is not int:
- raise TypeError("an integer is required: %s" % str(self.server_port))
- if self.server_port < 0 or self.server_port > 65535:
- raise OverflowError("port number must be 0-65535.: %s" % str(self.server_port))
diff --git a/src/bin/stats/tests/fake_time.py b/src/bin/stats/tests/fake_time.py
deleted file mode 100644
index 65e0237..0000000
--- a/src/bin/stats/tests/fake_time.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = "$Revision$"
-
-# This is a dummy time class against a Python standard time class.
-# It is just testing use only.
-# Other methods which time class has is not implemented.
-# (This class isn't orderloaded for time class.)
-
-# These variables are constant. These are example.
-_TEST_TIME_SECS = 1283364938.229088
-_TEST_TIME_STRF = '2010-09-01T18:15:38Z'
-
-def time():
- """
- This is a dummy time() method against time.time()
- """
- # return float constant value
- return _TEST_TIME_SECS
-
-def gmtime():
- """
- This is a dummy gmtime() method against time.gmtime()
- """
- # always return nothing
- return None
-
-def strftime(*arg):
- """
- This is a dummy gmtime() method against time.gmtime()
- """
- return _TEST_TIME_STRF
-
-
diff --git a/src/bin/stats/tests/http/Makefile.am b/src/bin/stats/tests/http/Makefile.am
deleted file mode 100644
index 79263a9..0000000
--- a/src/bin/stats/tests/http/Makefile.am
+++ /dev/null
@@ -1,6 +0,0 @@
-EXTRA_DIST = __init__.py server.py
-CLEANFILES = __init__.pyc server.pyc
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/http/__init__.py b/src/bin/stats/tests/http/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/http/server.py b/src/bin/stats/tests/http/server.py
deleted file mode 100644
index 70ed6fa..0000000
--- a/src/bin/stats/tests/http/server.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of http.server
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-
-class DummyHttpResponse:
- def __init__(self, path):
- self.path = path
- self.headers={}
- self.log = ""
-
- def _write_log(self, msg):
- self.log = self.log + msg
-
-class HTTPServer:
- """
- A mock-up class of http.server.HTTPServer
- """
- address_family = fake_socket.AF_INET
- def __init__(self, server_class, handler_class):
- self.socket = fake_socket.socket(self.address_family)
- self.server_class = server_class
- self.socket.bind(self.server_class)
- self._handler = handler_class(None, None, self)
-
- def handle_request(self):
- pass
-
- def server_close(self):
- self.socket.close()
-
-class BaseHTTPRequestHandler:
- """
- A mock-up class of http.server.BaseHTTPRequestHandler
- """
-
- def __init__(self, request, client_address, server):
- self.path = "/path/to"
- self.headers = {}
- self.server = server
- self.response = DummyHttpResponse(path=self.path)
- self.response.write = self._write
- self.wfile = self.response
-
- def send_response(self, code=0):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.code = code
-
- def send_header(self, key, value):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.headers[key] = value
-
- def end_headers(self):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.wrote_headers = True
-
- def send_error(self, code, message=None):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.code = code
- self.response.body = message
-
- def address_string(self):
- return 'dummyhost'
-
- def log_date_time_string(self):
- return '[DD/MM/YYYY HH:MI:SS]'
-
- def _write(self, obj):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.body = obj.decode()
-
diff --git a/src/bin/stats/tests/isc/Makefile.am b/src/bin/stats/tests/isc/Makefile.am
deleted file mode 100644
index d31395d..0000000
--- a/src/bin/stats/tests/isc/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-SUBDIRS = cc config util log
-EXTRA_DIST = __init__.py
-CLEANFILES = __init__.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/__init__.py b/src/bin/stats/tests/isc/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/cc/Makefile.am b/src/bin/stats/tests/isc/cc/Makefile.am
deleted file mode 100644
index 67323b5..0000000
--- a/src/bin/stats/tests/isc/cc/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py session.py
-CLEANFILES = __init__.pyc session.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/cc/__init__.py b/src/bin/stats/tests/isc/cc/__init__.py
deleted file mode 100644
index 9a3eaf6..0000000
--- a/src/bin/stats/tests/isc/cc/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.cc.session import *
diff --git a/src/bin/stats/tests/isc/cc/session.py b/src/bin/stats/tests/isc/cc/session.py
deleted file mode 100644
index e16d6a9..0000000
--- a/src/bin/stats/tests/isc/cc/session.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import sys
-import fake_socket
-
-# set a dummy lname
-_TEST_LNAME = '123abc at xxxx'
-
-class Queue():
- def __init__(self, msg=None, env={}):
- self.msg = msg
- self.env = env
-
- def dump(self):
- return { 'msg': self.msg, 'env': self.env }
-
-class SessionError(Exception):
- pass
-
-class SessionTimeout(Exception):
- pass
-
-class Session:
- def __init__(self, socket_file=None, verbose=False):
- self._lname = _TEST_LNAME
- self.message_queue = []
- self.old_message_queue = []
- try:
- self._socket = fake_socket.socket()
- except fake_socket.error as se:
- raise SessionError(se)
- self.verbose = verbose
-
- @property
- def lname(self):
- return self._lname
-
- def close(self):
- self._socket.close()
-
- def _clear_queues(self):
- while len(self.message_queue) > 0:
- self.dequeue()
-
- def _next_sequence(self, que=None):
- return len(self.message_queue)
-
- def enqueue(self, msg=None, env={}):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- seq = self._next_sequence()
- env.update({"seq": 0}) # fixed here
- que = Queue(msg=msg, env=env)
- self.message_queue.append(que)
- if self.verbose:
- sys.stdout.write("[Session] enqueue: " + str(que.dump()) + "\n")
- return seq
-
- def dequeue(self):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- que = None
- try:
- que = self.message_queue.pop(0) # always pop at index 0
- self.old_message_queue.append(que)
- except IndexError:
- que = Queue()
- if self.verbose:
- sys.stdout.write("[Session] dequeue: " + str(que.dump()) + "\n")
- return que
-
- def get_queue(self, seq=None):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- if seq is None:
- seq = len(self.message_queue) - 1
- que = None
- try:
- que = self.message_queue[seq]
- except IndexError:
- raise IndexError
- que = Queue()
- if self.verbose:
- sys.stdout.write("[Session] get_queue: " + str(que.dump()) + "\n")
- return que
-
- def group_sendmsg(self, msg, group, instance="*", to="*"):
- return self.enqueue(msg=msg, env={
- "type": "send",
- "from": self._lname,
- "to": to,
- "group": group,
- "instance": instance })
-
- def group_recvmsg(self, nonblock=True, seq=0):
- que = self.dequeue()
- return que.msg, que.env
-
- def group_reply(self, routing, msg):
- return self.enqueue(msg=msg, env={
- "type": "send",
- "from": self._lname,
- "to": routing["from"],
- "group": routing["group"],
- "instance": routing["instance"],
- "reply": routing["seq"] })
-
- def get_message(self, group, to='*'):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- que = Queue()
- for q in self.message_queue:
- if q.env['group'] == group:
- self.message_queue.remove(q)
- self.old_message_queue.append(q)
- que = q
- if self.verbose:
- sys.stdout.write("[Session] get_message: " + str(que.dump()) + "\n")
- return q.msg
-
- def group_subscribe(self, group, instance = "*"):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
-
- def group_unsubscribe(self, group, instance = "*"):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
diff --git a/src/bin/stats/tests/isc/config/Makefile.am b/src/bin/stats/tests/isc/config/Makefile.am
deleted file mode 100644
index ffbecda..0000000
--- a/src/bin/stats/tests/isc/config/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py ccsession.py
-CLEANFILES = __init__.pyc ccsession.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/config/__init__.py b/src/bin/stats/tests/isc/config/__init__.py
deleted file mode 100644
index 4c49e95..0000000
--- a/src/bin/stats/tests/isc/config/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.config.ccsession import *
diff --git a/src/bin/stats/tests/isc/config/ccsession.py b/src/bin/stats/tests/isc/config/ccsession.py
deleted file mode 100644
index 50f7c1b..0000000
--- a/src/bin/stats/tests/isc/config/ccsession.py
+++ /dev/null
@@ -1,249 +0,0 @@
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import json
-import os
-import time
-from isc.cc.session import Session
-
-COMMAND_CONFIG_UPDATE = "config_update"
-
-def parse_answer(msg):
- assert 'result' in msg
- try:
- return msg['result'][0], msg['result'][1]
- except IndexError:
- return msg['result'][0], None
-
-def create_answer(rcode, arg = None):
- if arg is None:
- return { 'result': [ rcode ] }
- else:
- return { 'result': [ rcode, arg ] }
-
-def parse_command(msg):
- assert 'command' in msg
- try:
- return msg['command'][0], msg['command'][1]
- except IndexError:
- return msg['command'][0], None
-
-def create_command(command_name, params = None):
- if params is None:
- return {"command": [command_name]}
- else:
- return {"command": [command_name, params]}
-
-def module_spec_from_file(spec_file, check = True):
- try:
- file = open(spec_file)
- json_str = file.read()
- module_spec = json.loads(json_str)
- file.close()
- return ModuleSpec(module_spec['module_spec'], check)
- except IOError as ioe:
- raise ModuleSpecError("JSON read error: " + str(ioe))
- except ValueError as ve:
- raise ModuleSpecError("JSON parse error: " + str(ve))
- except KeyError as err:
- raise ModuleSpecError("Data definition has no module_spec element")
-
-class ModuleSpecError(Exception):
- pass
-
-class ModuleSpec:
- def __init__(self, module_spec, check = True):
- # check only confi_data for testing
- if check and "config_data" in module_spec:
- _check_config_spec(module_spec["config_data"])
- self._module_spec = module_spec
-
- def get_config_spec(self):
- return self._module_spec['config_data']
-
- def get_commands_spec(self):
- return self._module_spec['commands']
-
- def get_module_name(self):
- return self._module_spec['module_name']
-
-def _check_config_spec(config_data):
- # config data is a list of items represented by dicts that contain
- # things like "item_name", depending on the type they can have
- # specific subitems
- """Checks a list that contains the configuration part of the
- specification. Raises a ModuleSpecError if there is a
- problem."""
- if type(config_data) != list:
- raise ModuleSpecError("config_data is of type " + str(type(config_data)) + ", not a list of items")
- for config_item in config_data:
- _check_item_spec(config_item)
-
-def _check_item_spec(config_item):
- """Checks the dict that defines one config item
- (i.e. containing "item_name", "item_type", etc.
- Raises a ModuleSpecError if there is an error"""
- if type(config_item) != dict:
- raise ModuleSpecError("item spec not a dict")
- if "item_name" not in config_item:
- raise ModuleSpecError("no item_name in config item")
- if type(config_item["item_name"]) != str:
- raise ModuleSpecError("item_name is not a string: " + str(config_item["item_name"]))
- item_name = config_item["item_name"]
- if "item_type" not in config_item:
- raise ModuleSpecError("no item_type in config item")
- item_type = config_item["item_type"]
- if type(item_type) != str:
- raise ModuleSpecError("item_type in " + item_name + " is not a string: " + str(type(item_type)))
- if item_type not in ["integer", "real", "boolean", "string", "list", "map", "any"]:
- raise ModuleSpecError("unknown item_type in " + item_name + ": " + item_type)
- if "item_optional" in config_item:
- if type(config_item["item_optional"]) != bool:
- raise ModuleSpecError("item_default in " + item_name + " is not a boolean")
- if not config_item["item_optional"] and "item_default" not in config_item:
- raise ModuleSpecError("no default value for non-optional item " + item_name)
- else:
- raise ModuleSpecError("item_optional not in item " + item_name)
- if "item_default" in config_item:
- item_default = config_item["item_default"]
- if (item_type == "integer" and type(item_default) != int) or \
- (item_type == "real" and type(item_default) != float) or \
- (item_type == "boolean" and type(item_default) != bool) or \
- (item_type == "string" and type(item_default) != str) or \
- (item_type == "list" and type(item_default) != list) or \
- (item_type == "map" and type(item_default) != dict):
- raise ModuleSpecError("Wrong type for item_default in " + item_name)
- # TODO: once we have check_type, run the item default through that with the list|map_item_spec
- if item_type == "list":
- if "list_item_spec" not in config_item:
- raise ModuleSpecError("no list_item_spec in list item " + item_name)
- if type(config_item["list_item_spec"]) != dict:
- raise ModuleSpecError("list_item_spec in " + item_name + " is not a dict")
- _check_item_spec(config_item["list_item_spec"])
- if item_type == "map":
- if "map_item_spec" not in config_item:
- raise ModuleSpecError("no map_item_sepc in map item " + item_name)
- if type(config_item["map_item_spec"]) != list:
- raise ModuleSpecError("map_item_spec in " + item_name + " is not a list")
- for map_item in config_item["map_item_spec"]:
- if type(map_item) != dict:
- raise ModuleSpecError("map_item_spec element is not a dict")
- _check_item_spec(map_item)
- if 'item_format' in config_item and 'item_default' in config_item:
- item_format = config_item["item_format"]
- item_default = config_item["item_default"]
- if not _check_format(item_default, item_format):
- raise ModuleSpecError(
- "Wrong format for " + str(item_default) + " in " + str(item_name))
-
-def _check_format(value, format_name):
- """Check if specified value and format are correct. Return True if
- is is correct."""
- # TODO: should be added other format types if necessary
- time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
- 'date' : "%Y-%m-%d",
- 'time' : "%H:%M:%S" }
- for fmt in time_formats:
- if format_name == fmt:
- try:
- time.strptime(value, time_formats[fmt])
- return True
- except (ValueError, TypeError):
- break
- return False
-
-class ModuleCCSessionError(Exception):
- pass
-
-class DataNotFoundError(Exception):
- pass
-
-class ConfigData:
- def __init__(self, specification):
- self.specification = specification
-
- def get_value(self, identifier):
- """Returns a tuple where the first item is the value at the
- given identifier, and the second item is absolutely False
- even if the value is an unset default or not. Raises an
- DataNotFoundError if the identifier is not found in the
- specification file.
- *** NOTE ***
- There are some differences from the original method. This
- method never handles local settings like the original
- method. But these different behaviors aren't so big issues
- for a mock-up method of stats_httpd because stats_httpd
- calls this method at only first."""
- for config_map in self.get_module_spec().get_config_spec():
- if config_map['item_name'] == identifier:
- if 'item_default' in config_map:
- return config_map['item_default'], False
- raise DataNotFoundError("item_name %s is not found in the specfile" % identifier)
-
- def get_module_spec(self):
- return self.specification
-
-class ModuleCCSession(ConfigData):
- def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None):
- module_spec = module_spec_from_file(spec_file_name)
- ConfigData.__init__(self, module_spec)
- self._module_name = module_spec.get_module_name()
- self.set_config_handler(config_handler)
- self.set_command_handler(command_handler)
- if not cc_session:
- self._session = Session(verbose=True)
- else:
- self._session = cc_session
-
- def start(self):
- pass
-
- def close(self):
- self._session.close()
-
- def check_command(self, nonblock=True):
- msg, env = self._session.group_recvmsg(nonblock)
- if not msg or 'result' in msg:
- return
- cmd, arg = parse_command(msg)
- answer = None
- if cmd == COMMAND_CONFIG_UPDATE and self._config_handler:
- answer = self._config_handler(arg)
- elif env['group'] == self._module_name and self._command_handler:
- answer = self._command_handler(cmd, arg)
- if answer:
- self._session.group_reply(env, answer)
-
- def set_config_handler(self, config_handler):
- self._config_handler = config_handler
- # should we run this right now since we've changed the handler?
-
- def set_command_handler(self, command_handler):
- self._command_handler = command_handler
-
- def get_module_spec(self):
- return self.specification
-
- def get_socket(self):
- return self._session._socket
-
diff --git a/src/bin/stats/tests/isc/log/Makefile.am b/src/bin/stats/tests/isc/log/Makefile.am
deleted file mode 100644
index 457b9de..0000000
--- a/src/bin/stats/tests/isc/log/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py
-CLEANFILES = __init__.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/log/__init__.py b/src/bin/stats/tests/isc/log/__init__.py
deleted file mode 100644
index 641cf79..0000000
--- a/src/bin/stats/tests/isc/log/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-# This file is not installed. The log.so is installed into the right place.
-# It is only to find it in the .libs directory when we run as a test or
-# from the build directory.
-# But as nobody gives us the builddir explicitly (and we can't use generation
-# from .in file, as it would put us into the builddir and we wouldn't be found)
-# we guess from current directory. Any idea for something better? This should
-# be enough for the tests, but would it work for B10_FROM_SOURCE as well?
-# Should we look there? Or define something in bind10_config?
-
-import os
-import sys
-
-for base in sys.path[:]:
- loglibdir = os.path.join(base, 'isc/log/.libs')
- if os.path.exists(loglibdir):
- sys.path.insert(0, loglibdir)
-
-from log import *
diff --git a/src/bin/stats/tests/isc/log_messages/Makefile.am b/src/bin/stats/tests/isc/log_messages/Makefile.am
new file mode 100644
index 0000000..90b4499
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/Makefile.am
@@ -0,0 +1,7 @@
+EXTRA_DIST = __init__.py stats_messages.py stats_httpd_messages.py
+CLEANFILES = __init__.pyc stats_messages.pyc stats_httpd_messages.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/log_messages/__init__.py b/src/bin/stats/tests/isc/log_messages/__init__.py
new file mode 100644
index 0000000..58e99e3
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''
+This is a fake package that acts as a forwarder to the real package.
+'''
diff --git a/src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py b/src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py
new file mode 100644
index 0000000..0adb0f0
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py
@@ -0,0 +1,16 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from work.stats_httpd_messages import *
diff --git a/src/bin/stats/tests/isc/log_messages/stats_messages.py b/src/bin/stats/tests/isc/log_messages/stats_messages.py
new file mode 100644
index 0000000..c05a6a8
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/stats_messages.py
@@ -0,0 +1,16 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from work.stats_messages import *
diff --git a/src/bin/stats/tests/isc/util/Makefile.am b/src/bin/stats/tests/isc/util/Makefile.am
deleted file mode 100644
index 9c74354..0000000
--- a/src/bin/stats/tests/isc/util/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py process.py
-CLEANFILES = __init__.pyc process.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/util/__init__.py b/src/bin/stats/tests/isc/util/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/util/process.py b/src/bin/stats/tests/isc/util/process.py
deleted file mode 100644
index 0f764c1..0000000
--- a/src/bin/stats/tests/isc/util/process.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A dummy function of isc.util.process.rename()
-"""
-
-def rename(name=None):
- pass
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
new file mode 100644
index 0000000..da0bac4
--- /dev/null
+++ b/src/bin/stats/tests/test_utils.py
@@ -0,0 +1,364 @@
+"""
+Utilities and mock modules for unittests of statistics modules
+
+"""
+import os
+import io
+import time
+import sys
+import threading
+import tempfile
+import json
+import signal
+
+import msgq
+import isc.config.cfgmgr
+import stats
+import stats_httpd
+
+# Change value of BIND10_MSGQ_SOCKET_FILE in environment variables
+if 'BIND10_MSGQ_SOCKET_FILE' not in os.environ:
+ os.environ['BIND10_MSGQ_SOCKET_FILE'] = tempfile.mktemp(prefix='msgq_socket_')
+
+class SignalHandler():
+ """A signal handler class for deadlock in unittest"""
+ def __init__(self, fail_handler, timeout=20):
+ """sets a schedule in SIGARM for invoking the handler via
+ unittest.TestCase after timeout seconds (default is 20)"""
+ self.fail_handler = fail_handler
+ self.orig_handler = signal.signal(signal.SIGALRM, self.sig_handler)
+ signal.alarm(timeout)
+
+ def reset(self):
+ """resets the schedule in SIGALRM"""
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.orig_handler)
+
+ def sig_handler(self, signal, frame):
+ """envokes unittest.TestCase.fail as a signal handler"""
+ self.fail_handler("A deadlock might be detected")
+
+def send_command(command_name, module_name, params=None, session=None, nonblock=False, timeout=None):
+ if session is not None:
+ cc_session = session
+ else:
+ cc_session = isc.cc.Session()
+ if timeout is not None:
+ orig_timeout = cc_session.get_timeout()
+ cc_session.set_timeout(timeout * 1000)
+ command = isc.config.ccsession.create_command(command_name, params)
+ seq = cc_session.group_sendmsg(command, module_name)
+ try:
+ (answer, env) = cc_session.group_recvmsg(nonblock, seq)
+ if answer:
+ return isc.config.ccsession.parse_answer(answer)
+ except isc.cc.SessionTimeout:
+ pass
+ finally:
+ if timeout is not None:
+ cc_session.set_timeout(orig_timeout)
+ if session is None:
+ cc_session.close()
+
+def send_shutdown(module_name, **kwargs):
+ return send_command("shutdown", module_name, **kwargs)
+
+class ThreadingServerManager:
+ def __init__(self, server, *args, **kwargs):
+ self.server = server(*args, **kwargs)
+ self.server_name = server.__name__
+ self.server._thread = threading.Thread(
+ name=self.server_name, target=self.server.run)
+ self.server._thread.daemon = True
+
+ def run(self):
+ self.server._thread.start()
+ self.server._started.wait()
+ self.server._started.clear()
+
+ def shutdown(self):
+ self.server.shutdown()
+ self.server._thread.join(0) # timeout is 0
+
+def do_nothing(*args, **kwargs): pass
+
+class dummy_sys:
+ """Dummy for sys"""
+ class dummy_io:
+ write = do_nothing
+ stdout = stderr = dummy_io()
+
+class MockMsgq:
+ def __init__(self):
+ self._started = threading.Event()
+ # suppress output to stdout and stderr
+ msgq.sys = dummy_sys()
+ msgq.print = do_nothing
+ self.msgq = msgq.MsgQ(verbose=False)
+ result = self.msgq.setup()
+ if result:
+ sys.exit("Error on Msgq startup: %s" % result)
+
+ def run(self):
+ self._started.set()
+ try:
+ self.msgq.run()
+ except Exception:
+ pass
+ finally:
+ # explicitly shut down the socket of the msgq before
+ # shutting down the msgq
+ self.msgq.listen_socket.shutdown(msgq.socket.SHUT_RDWR)
+ self.msgq.shutdown()
+
+ def shutdown(self):
+ # do nothing for avoiding shutting down the msgq twice
+ pass
+
+class MockCfgmgr:
+ def __init__(self):
+ self._started = threading.Event()
+ self.cfgmgr = isc.config.cfgmgr.ConfigManager(
+ os.environ['CONFIG_TESTDATA_PATH'], "b10-config.db")
+ self.cfgmgr.read_config()
+
+ def run(self):
+ self._started.set()
+ try:
+ self.cfgmgr.run()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.cfgmgr.running = False
+
+class MockBoss:
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Boss",
+ "module_description": "Mock Master process",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "sendstats",
+ "command_description": "Send data to a statistics module at once",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+"""
+ _BASETIME = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+
+ def __init__(self):
+ self._started = threading.Event()
+ self.running = False
+ self.spec_file = io.StringIO(self.spec_str)
+ # create ModuleCCSession object
+ self.mccs = isc.config.ModuleCCSession(
+ self.spec_file,
+ self.config_handler,
+ self.command_handler)
+ self.spec_file.close()
+ self.cc_session = self.mccs._session
+ self.got_command_name = ''
+
+ def run(self):
+ self.mccs.start()
+ self.running = True
+ self._started.set()
+ try:
+ while self.running:
+ self.mccs.check_command(False)
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def config_handler(self, new_config):
+ return isc.config.create_answer(0)
+
+ def command_handler(self, command, *args, **kwargs):
+ self._started.set()
+ self.got_command_name = command
+ params = { "owner": "Boss",
+ "data": {
+ 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', self._BASETIME)
+ }
+ }
+ if command == 'sendstats':
+ send_command("set", "Stats", params=params, session=self.cc_session)
+ return isc.config.create_answer(0)
+ elif command == 'getstats':
+ return isc.config.create_answer(0, params)
+ return isc.config.create_answer(1, "Unknown Command")
+
+class MockAuth:
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Auth",
+ "module_description": "Mock Authoritative service",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "sendstats",
+ "command_description": "Send data to a statistics module at once",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
+ ]
+ }
+}
+"""
+ def __init__(self):
+ self._started = threading.Event()
+ self.running = False
+ self.spec_file = io.StringIO(self.spec_str)
+ # create ModuleCCSession object
+ self.mccs = isc.config.ModuleCCSession(
+ self.spec_file,
+ self.config_handler,
+ self.command_handler)
+ self.spec_file.close()
+ self.cc_session = self.mccs._session
+ self.got_command_name = ''
+ self.queries_tcp = 3
+ self.queries_udp = 2
+
+ def run(self):
+ self.mccs.start()
+ self.running = True
+ self._started.set()
+ try:
+ while self.running:
+ self.mccs.check_command(False)
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def config_handler(self, new_config):
+ return isc.config.create_answer(0)
+
+ def command_handler(self, command, *args, **kwargs):
+ self.got_command_name = command
+ if command == 'sendstats':
+ params = { "owner": "Auth",
+ "data": { 'queries.tcp': self.queries_tcp,
+ 'queries.udp': self.queries_udp } }
+ return send_command("set", "Stats", params=params, session=self.cc_session)
+ return isc.config.create_answer(1, "Unknown Command")
+
+class MyStats(stats.Stats):
+ def __init__(self):
+ self._started = threading.Event()
+ stats.Stats.__init__(self)
+
+ def run(self):
+ self._started.set()
+ try:
+ self.start()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.command_shutdown()
+
+class MyStatsHttpd(stats_httpd.StatsHttpd):
+ ORIG_SPECFILE_LOCATION = stats_httpd.SPECFILE_LOCATION
+ def __init__(self, *server_address):
+ self._started = threading.Event()
+ if server_address:
+ stats_httpd.SPECFILE_LOCATION = self.create_specfile(*server_address)
+ try:
+ stats_httpd.StatsHttpd.__init__(self)
+ finally:
+ if hasattr(stats_httpd.SPECFILE_LOCATION, "close"):
+ stats_httpd.SPECFILE_LOCATION.close()
+ stats_httpd.SPECFILE_LOCATION = self.ORIG_SPECFILE_LOCATION
+ else:
+ stats_httpd.StatsHttpd.__init__(self)
+
+ def create_specfile(self, *server_address):
+ spec_io = open(self.ORIG_SPECFILE_LOCATION)
+ try:
+ spec = json.load(spec_io)
+ spec_io.close()
+ config = spec['module_spec']['config_data']
+ for i in range(len(config)):
+ if config[i]['item_name'] == 'listen_on':
+ config[i]['item_default'] = \
+ [ dict(address=a[0], port=a[1]) for a in server_address ]
+ break
+ return io.StringIO(json.dumps(spec))
+ finally:
+ spec_io.close()
+
+ def run(self):
+ self._started.set()
+ try:
+ self.start()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.command_handler('shutdown', None)
+
+class BaseModules:
+ def __init__(self):
+ # MockMsgq
+ self.msgq = ThreadingServerManager(MockMsgq)
+ self.msgq.run()
+ # Check whether msgq is ready. A SessionTimeout is raised here if not.
+ isc.cc.session.Session().close()
+ # MockCfgmgr
+ self.cfgmgr = ThreadingServerManager(MockCfgmgr)
+ self.cfgmgr.run()
+ # MockBoss
+ self.boss = ThreadingServerManager(MockBoss)
+ self.boss.run()
+ # MockAuth
+ self.auth = ThreadingServerManager(MockAuth)
+ self.auth.run()
+
+ def shutdown(self):
+ # MockAuth
+ self.auth.shutdown()
+ # MockBoss
+ self.boss.shutdown()
+ # MockCfgmgr
+ self.cfgmgr.shutdown()
+ # MockMsgq
+ self.msgq.shutdown()
diff --git a/src/bin/stats/tests/testdata/Makefile.am b/src/bin/stats/tests/testdata/Makefile.am
deleted file mode 100644
index 1b8df6d..0000000
--- a/src/bin/stats/tests/testdata/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-EXTRA_DIST = stats_test.spec
diff --git a/src/bin/stats/tests/testdata/stats_test.spec b/src/bin/stats/tests/testdata/stats_test.spec
deleted file mode 100644
index 8136756..0000000
--- a/src/bin/stats/tests/testdata/stats_test.spec
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Stats daemon",
- "config_data": [],
- "commands": [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "the_dummy",
- "command_description": "this is for testing",
- "command_args": []
- }
- ]
- }
-}
diff --git a/src/bin/tests/Makefile.am b/src/bin/tests/Makefile.am
index 56ff68b..41b497f 100644
--- a/src/bin/tests/Makefile.am
+++ b/src/bin/tests/Makefile.am
@@ -8,19 +8,20 @@ noinst_SCRIPTS = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
+ chmod +x $(abs_builddir)/$$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrin/Makefile.am b/src/bin/xfrin/Makefile.am
index 0af9be6..8d80b22 100644
--- a/src/bin/xfrin/Makefile.am
+++ b/src/bin/xfrin/Makefile.am
@@ -6,9 +6,13 @@ pkglibexec_SCRIPTS = b10-xfrin
b10_xfrindir = $(pkgdatadir)
b10_xfrin_DATA = xfrin.spec
-pyexec_DATA = xfrin_messages.py
-CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py xfrin_messages.py xfrin_messages.pyc
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.pyc
man_MANS = b10-xfrin.8
EXTRA_DIST = $(man_MANS) b10-xfrin.xml
@@ -22,11 +26,12 @@ b10-xfrin.8: b10-xfrin.xml
endif
# Define rule to build logging source files from message file
-xfrin_messages.py: xfrin_messages.mes
- $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrin/xfrin_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py : xfrin_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/xfrin_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrin: xfrin.py xfrin_messages.py
+b10-xfrin: xfrin.py $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrin.py >$@
chmod a+x $@
diff --git a/src/bin/xfrin/b10-xfrin.8 b/src/bin/xfrin/b10-xfrin.8
index 3ea2293..54dbe7c 100644
--- a/src/bin/xfrin/b10-xfrin.8
+++ b/src/bin/xfrin/b10-xfrin.8
@@ -2,12 +2,12 @@
.\" Title: b10-xfrin
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: May 19, 2011
+.\" Date: September 8, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-XFRIN" "8" "May 19, 2011" "BIND10" "BIND10"
+.TH "B10\-XFRIN" "8" "September 8, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -61,7 +61,7 @@ receives its configurations from
.PP
The configurable settings are:
.PP
-\fItransfers\-in\fR
+\fItransfers_in\fR
defines the maximum number of inbound zone transfers that can run concurrently\&. The default is 10\&.
.PP
@@ -71,6 +71,9 @@ is a list of zones known to the
daemon\&. The list items are:
\fIname\fR
(the zone name),
+\fIclass\fR
+(defaults to
+\(lqIN\(rq),
\fImaster_addr\fR
(the zone master to transfer from),
\fImaster_port\fR
@@ -125,7 +128,7 @@ to define the class (defaults to
\fImaster\fR
to define the IP address of the authoritative server to transfer from, and
\fIport\fR
-to define the port number on the authoritative server (defaults to 53)\&. If the address or port is not specified, it will use the values previously defined in the
+to define the port number on the authoritative server (defaults to 53)\&. If the address or port is not specified, it will use the value previously defined in the
\fIzones\fR
configuration\&.
.PP
diff --git a/src/bin/xfrin/b10-xfrin.xml b/src/bin/xfrin/b10-xfrin.xml
index ea4c724..d45e15f 100644
--- a/src/bin/xfrin/b10-xfrin.xml
+++ b/src/bin/xfrin/b10-xfrin.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>May 19, 2011</date>
+ <date>September 8, 2011</date>
</refentryinfo>
<refmeta>
@@ -92,7 +92,7 @@ in separate zonemgr process.
The configurable settings are:
</para>
- <para><varname>transfers-in</varname>
+ <para><varname>transfers_in</varname>
defines the maximum number of inbound zone transfers
that can run concurrently. The default is 10.
</para>
@@ -103,6 +103,7 @@ in separate zonemgr process.
<command>b10-xfrin</command> daemon.
The list items are:
<varname>name</varname> (the zone name),
+ <varname>class</varname> (defaults to <quote>IN</quote>),
<varname>master_addr</varname> (the zone master to transfer from),
<varname>master_port</varname> (defaults to 53), and
<varname>tsig_key</varname> (optional TSIG key to use).
@@ -168,7 +169,7 @@ in separate zonemgr process.
and <varname>port</varname> to define the port number on the
authoritative server (defaults to 53).
If the address or port is not specified, it will use the
- values previously defined in the <varname>zones</varname>
+ value previously defined in the <varname>zones</varname>
configuration.
</para>
<!-- TODO: later hostname for master? -->
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index 0f485aa..ae6620c 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -1,3 +1,5 @@
+SUBDIRS = testdata .
+
PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = xfrin_test.py
EXTRA_DIST = $(PYTESTS)
@@ -6,7 +8,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/xfr/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +21,8 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(COMMON_PYTHON_PATH) \
+ TESTDATASRCDIR=$(abs_top_srcdir)/src/bin/xfrin/tests/testdata/ \
+ TESTDATAOBJDIR=$(abs_top_builddir)/src/bin/xfrin/tests/testdata/ \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrin/tests/testdata/Makefile.am b/src/bin/xfrin/tests/testdata/Makefile.am
new file mode 100644
index 0000000..5e325cb
--- /dev/null
+++ b/src/bin/xfrin/tests/testdata/Makefile.am
@@ -0,0 +1,2 @@
+EXTRA_DIST = example.com # not necessarily needed, but for reference
+EXTRA_DIST += example.com.sqlite3
diff --git a/src/bin/xfrin/tests/testdata/example.com b/src/bin/xfrin/tests/testdata/example.com
new file mode 100644
index 0000000..2afcd28
--- /dev/null
+++ b/src/bin/xfrin/tests/testdata/example.com
@@ -0,0 +1,17 @@
+;; This is a simplest form of zone file for 'example.com', which is the
+;; source of the corresponding sqlite3 DB file. This file is provided
+;; for reference purposes only; it's not actually used anywhere.
+
+example.com. 3600 IN SOA master.example.com. admin.example.com. (
+ 1230 ; serial
+ 3600 ; refresh (1 hour)
+ 1800 ; retry (30 minutes)
+ 2419200 ; expire (4 weeks)
+ 7200 ; minimum (2 hours)
+ )
+ 3600 NS dns01.example.com.
+ 3600 NS dns02.example.com.
+ 3600 NS dns03.example.com.
+dns01.example.com. 3600 IN A 192.0.2.1
+dns02.example.com. 3600 IN A 192.0.2.2
+dns03.example.com. 3600 IN A 192.0.2.3
diff --git a/src/bin/xfrin/tests/testdata/example.com.sqlite3 b/src/bin/xfrin/tests/testdata/example.com.sqlite3
new file mode 100644
index 0000000..ed241c3
Binary files /dev/null and b/src/bin/xfrin/tests/testdata/example.com.sqlite3 differ
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 92bf1b0..68a3124 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -14,10 +14,12 @@
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
+import shutil
import socket
import io
from isc.testutils.tsigctx_mock import MockTSIGContext
from xfrin import *
+from isc.xfrin.diff import Diff
import isc.log
#
@@ -36,25 +38,58 @@ TEST_MASTER_IPV6_ADDRESS = '::1'
TEST_MASTER_IPV6_ADDRINFO = (socket.AF_INET6, socket.SOCK_STREAM,
socket.IPPROTO_TCP, '',
(TEST_MASTER_IPV6_ADDRESS, 53))
+
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
+TESTDATA_OBJDIR = os.getenv("TESTDATAOBJDIR")
# XXX: This should be a non priviledge port that is unlikely to be used.
# If some other process uses this port test will fail.
TEST_MASTER_PORT = '53535'
TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
+# SOA intended to be used for the new SOA as a result of transfer.
soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
'master.example.com. admin.example.com ' +
'1234 3600 1800 2419200 7200')
-soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
- RRTTL(3600))
+soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
soa_rrset.add_rdata(soa_rdata)
-example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.AXFR())
-example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.SOA())
+
+# SOA intended to be used for the current SOA at the secondary side.
+# Note that its serial is smaller than that of soa_rdata.
+begin_soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
+ 'master.example.com. admin.example.com ' +
+ '1230 3600 1800 2419200 7200')
+begin_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
+begin_soa_rrset.add_rdata(begin_soa_rdata)
+example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR())
+example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA())
default_questions = [example_axfr_question]
default_answers = [soa_rrset]
+def check_diffs(assert_fn, expected, actual):
+ '''A helper function checking the differences made in the XFR session.
+
+ This is expected called from some subclass of unittest.TestCase and
+ assert_fn is generally expected to be 'self.assertEqual' of that class.
+
+ '''
+ assert_fn(len(expected), len(actual))
+ for (diffs_exp, diffs_actual) in zip(expected, actual):
+ assert_fn(len(diffs_exp), len(diffs_actual))
+ for (diff_exp, diff_actual) in zip(diffs_exp, diffs_actual):
+ # operation should match
+ assert_fn(diff_exp[0], diff_actual[0])
+ # The diff as RRset should be equal (for simplicity we assume
+ # all RRsets contain exactly one RDATA)
+ assert_fn(diff_exp[1].get_name(), diff_actual[1].get_name())
+ assert_fn(diff_exp[1].get_type(), diff_actual[1].get_type())
+ assert_fn(diff_exp[1].get_class(), diff_actual[1].get_class())
+ assert_fn(diff_exp[1].get_rdata_count(),
+ diff_actual[1].get_rdata_count())
+ assert_fn(1, diff_exp[1].get_rdata_count())
+ assert_fn(diff_exp[1].get_rdata()[0],
+ diff_actual[1].get_rdata()[0])
+
class XfrinTestException(Exception):
pass
@@ -65,6 +100,78 @@ class MockCC():
if identifier == "zones/class":
return TEST_RRCLASS_STR
+class MockDataSourceClient():
+ '''A simple mock data source client.
+
+ This class provides a minimal set of wrappers related the data source
+ API that would be used by Diff objects. For our testing purposes they
+ only keep truck of the history of the changes.
+
+ '''
+ def __init__(self):
+ self.committed_diffs = []
+ self.diffs = []
+
+ def get_class(self):
+ '''Mock version of get_class().
+
+ We simply return the commonly used constant RR class. If and when
+ we use this mock for a different RR class we need to adjust it
+ accordingly.
+
+ '''
+ return TEST_RRCLASS
+
+ def find_zone(self, zone_name):
+ '''Mock version of find_zone().
+
+ It returns itself (subsequently acting as a mock ZoneFinder) for
+ some test zone names. For some others it returns either NOTFOUND
+ or PARTIALMATCH.
+
+ '''
+ if zone_name == TEST_ZONE_NAME or \
+ zone_name == Name('no-soa.example') or \
+ zone_name == Name('dup-soa.example'):
+ return (isc.datasrc.DataSourceClient.SUCCESS, self)
+ elif zone_name == Name('no-such-zone.example'):
+ return (DataSourceClient.NOTFOUND, None)
+ elif zone_name == Name('partial-match-zone.example'):
+ return (DataSourceClient.PARTIALMATCH, self)
+ raise ValueError('Unexpected input to mock client: bug in test case?')
+
+ def find(self, name, rrtype, target, options):
+ '''Mock ZoneFinder.find().
+
+ It returns the predefined SOA RRset to queries for SOA of the common
+ test zone name. It also emulates some unusual cases for special
+ zone names.
+
+ '''
+ if name == TEST_ZONE_NAME and rrtype == RRType.SOA():
+ return (ZoneFinder.SUCCESS, begin_soa_rrset)
+ if name == Name('no-soa.example'):
+ return (ZoneFinder.NXDOMAIN, None)
+ if name == Name('dup-soa.example'):
+ dup_soa_rrset = RRset(name, TEST_RRCLASS, RRType.SOA(), RRTTL(0))
+ dup_soa_rrset.add_rdata(begin_soa_rdata)
+ dup_soa_rrset.add_rdata(soa_rdata)
+ return (ZoneFinder.SUCCESS, dup_soa_rrset)
+ raise ValueError('Unexpected input to mock finder: bug in test case?')
+
+ def get_updater(self, zone_name, replace):
+ return self
+
+ def add_rrset(self, rrset):
+ self.diffs.append(('add', rrset))
+
+ def delete_rrset(self, rrset):
+ self.diffs.append(('delete', rrset))
+
+ def commit(self):
+ self.committed_diffs.append(self.diffs)
+ self.diffs = []
+
class MockXfrin(Xfrin):
# This is a class attribute of a callable object that specifies a non
# default behavior triggered in _cc_check_command(). Specific test methods
@@ -87,20 +194,21 @@ class MockXfrin(Xfrin):
MockXfrin.check_command_hook()
def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo,
- tsig_key, check_soa=True):
+ tsig_key, request_type, check_soa=True):
# store some of the arguments for verification, then call this
# method in the superclass
self.xfrin_started_master_addr = master_addrinfo[2][0]
self.xfrin_started_master_port = master_addrinfo[2][1]
- return Xfrin.xfrin_start(self, zone_name, rrclass, db_file,
+ self.xfrin_started_request_type = request_type
+ return Xfrin.xfrin_start(self, zone_name, rrclass, None,
master_addrinfo, tsig_key,
- check_soa)
+ request_type, check_soa)
class MockXfrinConnection(XfrinConnection):
def __init__(self, sock_map, zone_name, rrclass, db_file, shutdown_event,
master_addr):
- super().__init__(sock_map, zone_name, rrclass, db_file, shutdown_event,
- master_addr)
+ super().__init__(sock_map, zone_name, rrclass, MockDataSourceClient(),
+ db_file, shutdown_event, master_addr)
self.query_data = b''
self.reply_data = b''
self.force_time_out = False
@@ -122,7 +230,8 @@ class MockXfrinConnection(XfrinConnection):
data = self.reply_data[:size]
self.reply_data = self.reply_data[size:]
if len(data) < size:
- raise XfrinTestException('cannot get reply data')
+ raise XfrinTestException('cannot get reply data (' + str(size) +
+ ' bytes)')
return data
def send(self, data):
@@ -174,12 +283,288 @@ class MockXfrinConnection(XfrinConnection):
return reply_data
+class TestXfrinState(unittest.TestCase):
+ def setUp(self):
+ self.sock_map = {}
+ self.conn = MockXfrinConnection(self.sock_map, TEST_ZONE_NAME,
+ TEST_RRCLASS, TEST_DB_FILE,
+ threading.Event(),
+ TEST_MASTER_IPV4_ADDRINFO)
+ self.begin_soa = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ RRTTL(3600))
+ self.begin_soa.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS,
+ 'm. r. 1230 0 0 0 0'))
+ self.ns_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(),
+ RRTTL(3600))
+ self.ns_rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS,
+ 'ns.example.com'))
+ self.a_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.A(),
+ RRTTL(3600))
+ self.a_rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, '192.0.2.1'))
+
+ self.conn._datasrc_client = MockDataSourceClient()
+ self.conn._diff = Diff(self.conn._datasrc_client, TEST_ZONE_NAME)
+
+class TestXfrinStateBase(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+
+ def test_handle_rr_on_base(self):
+ # The base version of handle_rr() isn't supposed to be called
+ # directly (the argument doesn't matter in this test)
+ self.assertRaises(XfrinException, XfrinState().handle_rr, None)
+
+class TestXfrinInitialSOA(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinInitialSOA()
+
+ def test_handle_rr(self):
+ # normal case
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinFirstData()),
+ type(self.conn.get_xfrstate()))
+ self.assertEqual(1234, self.conn._end_serial)
+
+ def test_handle_not_soa(self):
+ # The given RR is not of SOA
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinFirstData(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinFirstData()
+ self.conn._request_type = RRType.IXFR()
+ self.conn._request_serial = 1230 # arbitrary chosen serial < 1234
+ self.conn._diff = None # should be replaced in the AXFR case
+
+ def test_handle_ixfr_begin_soa(self):
+ self.conn._request_type = RRType.IXFR()
+ self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual(type(XfrinIXFRDeleteSOA()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_axfr(self):
+ # If the original type is AXFR, other conditions aren't considered,
+ # and AXFR processing will continue
+ self.conn._request_type = RRType.AXFR()
+ self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+
+ def test_handle_ixfr_to_axfr(self):
+ # Detecting AXFR-compatible IXFR response by seeing a non SOA RR after
+ # the initial SOA. Should switch to AXFR.
+ self.assertFalse(self.state.handle_rr(self.conn, self.ns_rrset))
+ self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+ # The Diff for AXFR should be created at this point
+ self.assertNotEqual(None, self.conn._diff)
+
+ def test_handle_ixfr_to_axfr_by_different_soa(self):
+ # An unusual case: Response contains two consecutive SOA but the
+ # serial of the second does not match the requested one. See
+ # the documentation for XfrinFirstData.handle_rr().
+ self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+ self.assertNotEqual(None, self.conn._diff)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRDeleteSOA(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinIXFRDeleteSOA()
+ # In this state a new Diff object is expected to be created. To
+ # confirm it, we nullify it beforehand.
+ self.conn._diff = None
+
+ def test_handle_rr(self):
+ self.assertTrue(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual(type(XfrinIXFRDelete()),
+ type(self.conn.get_xfrstate()))
+ self.assertEqual([('delete', self.begin_soa)],
+ self.conn._diff.get_buffer())
+
+ def test_handle_non_soa(self):
+ self.assertRaises(XfrinException, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRDelete(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ # We need record the state in 'conn' to check the case where the
+ # state doesn't change.
+ XfrinIXFRDelete().set_xfrstate(self.conn, XfrinIXFRDelete())
+ self.state = self.conn.get_xfrstate()
+
+ def test_handle_delete_rr(self):
+ # Non SOA RRs are simply (goting to be) deleted in this state
+ self.assertTrue(self.state.handle_rr(self.conn, self.ns_rrset))
+ self.assertEqual([('delete', self.ns_rrset)],
+ self.conn._diff.get_buffer())
+ # The state shouldn't change
+ self.assertEqual(type(XfrinIXFRDelete()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_soa(self):
+ # SOA in this state means the beginning of added RRs. This SOA
+ # should also be added in the next state, so handle_rr() should return
+ # false.
+ self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual([], self.conn._diff.get_buffer())
+ self.assertEqual(1234, self.conn._current_serial)
+ self.assertEqual(type(XfrinIXFRAddSOA()),
+ type(self.conn.get_xfrstate()))
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRAddSOA(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinIXFRAddSOA()
+
+ def test_handle_rr(self):
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([('add', soa_rrset)],
+ self.conn._diff.get_buffer())
+
+ def test_handle_non_soa(self):
+ self.assertRaises(XfrinException, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRAdd(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ # We need record the state in 'conn' to check the case where the
+ # state doesn't change.
+ XfrinIXFRAdd().set_xfrstate(self.conn, XfrinIXFRAdd())
+ self.conn._current_serial = 1230
+ self.state = self.conn.get_xfrstate()
+
+ def test_handle_add_rr(self):
+ # Non SOA RRs are simply (goting to be) added in this state
+ self.assertTrue(self.state.handle_rr(self.conn, self.ns_rrset))
+ self.assertEqual([('add', self.ns_rrset)],
+ self.conn._diff.get_buffer())
+ # The state shouldn't change
+ self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
+
+ def test_handle_end_soa(self):
+ self.conn._end_serial = 1234
+ self.conn._diff.add_data(self.ns_rrset) # put some dummy change
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ # handle_rr should have caused commit, and the buffer should now be
+ # empty.
+ self.assertEqual([], self.conn._diff.get_buffer())
+
+ def test_handle_new_delete(self):
+ self.conn._end_serial = 1234
+ # SOA RR whose serial is the current one means we are going to a new
+ # difference, starting with removing that SOA.
+ self.conn._diff.add_data(self.ns_rrset) # put some dummy change
+ self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual([], self.conn._diff.get_buffer())
+ self.assertEqual(type(XfrinIXFRDeleteSOA()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_out_of_sync(self):
+ # getting SOA with an inconsistent serial. This is an error.
+ self.conn._end_serial = 1235
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr,
+ self.conn, soa_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFREnd(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinIXFREnd()
+
+ def test_handle_rr(self):
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertFalse(self.state.finish_message(self.conn))
+
+class TestXfrinAXFR(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinAXFR()
+
+ def test_handle_rr(self):
+ """
+ Test we can put data inside.
+ """
+ # Put some data inside
+ self.assertTrue(self.state.handle_rr(self.conn, self.a_rrset))
+ # This test uses internal Diff structure to check the behaviour of
+ # XfrinAXFR. Maybe there could be a cleaner way, but it would be more
+ # complicated.
+ self.assertEqual([('add', self.a_rrset)], self.conn._diff.get_buffer())
+ # This SOA terminates the transfer
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ # It should have changed the state
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ # At this point, the data haven't been committed yet
+ self.assertEqual([('add', self.a_rrset), ('add', soa_rrset)],
+ self.conn._diff.get_buffer())
+
+ def test_finish_message(self):
+ """
+ Check normal end of message.
+ """
+ # When a message ends, nothing happens usually
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinAXFREnd(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinAXFREnd()
+
+ def test_handle_rr(self):
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.conn._diff.add_data(self.a_rrset)
+ self.conn._diff.add_data(soa_rrset)
+ self.assertFalse(self.state.finish_message(self.conn))
+
+ # The data should have been committed
+ self.assertEqual([], self.conn._diff.get_buffer())
+ check_diffs(self.assertEqual, [[('add', self.a_rrset),
+ ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+ self.assertRaises(ValueError, self.conn._diff.commit)
+
class TestXfrinConnection(unittest.TestCase):
+ '''Convenient parent class for XFR-protocol tests.
+
+ This class provides common setups and helper methods for protocol related
+ tests on AXFR and IXFR.
+
+ '''
+
def setUp(self):
if os.path.exists(TEST_DB_FILE):
os.remove(TEST_DB_FILE)
self.sock_map = {}
- self.conn = MockXfrinConnection(self.sock_map, 'example.com.',
+ self.conn = MockXfrinConnection(self.sock_map, TEST_ZONE_NAME,
TEST_RRCLASS, TEST_DB_FILE,
threading.Event(),
TEST_MASTER_IPV4_ADDRINFO)
@@ -201,6 +586,82 @@ class TestXfrinConnection(unittest.TestCase):
if os.path.exists(TEST_DB_FILE):
os.remove(TEST_DB_FILE)
+ def _handle_xfrin_response(self):
+ # This helper methods iterates over all RRs (excluding the ending SOA)
+ # transferred, and simply returns the number of RRs. The return value
+ # may be used an assertion value for test cases.
+ rrs = 0
+ for rr in self.conn._handle_axfrin_response():
+ rrs += 1
+ return rrs
+
+ def _create_normal_response_data(self):
+ # This helper method creates a simple sequence of DNS messages that
+ # forms a valid AXFR transaction. It consists of two messages, each
+ # containing just a single SOA RR.
+ tsig_1st = self.axfr_response_params['tsig_1st']
+ tsig_2nd = self.axfr_response_params['tsig_2nd']
+ self.conn.reply_data = self.conn.create_response_data(tsig_ctx=tsig_1st)
+ self.conn.reply_data += \
+ self.conn.create_response_data(tsig_ctx=tsig_2nd)
+
+ def _create_soa_response_data(self):
+ # This helper method creates a DNS message that is supposed to be
+ # used a valid response to SOA queries prior to XFR.
+ # If tsig is True, it tries to verify the query with a locally
+ # created TSIG context (which may or may not succeed) so that the
+ # response will include a TSIG.
+ # If axfr_after_soa is True, it resets the response_generator so that
+ # a valid XFR messages will follow.
+
+ verify_ctx = None
+ if self.soa_response_params['tsig']:
+ # xfrin (currently) always uses TCP. strip off the length field.
+ query_data = self.conn.query_data[2:]
+ query_message = Message(Message.PARSE)
+ query_message.from_wire(query_data)
+ verify_ctx = TSIGContext(TSIG_KEY)
+ verify_ctx.verify(query_message.get_tsig_record(), query_data)
+
+ self.conn.reply_data = self.conn.create_response_data(
+ bad_qid=self.soa_response_params['bad_qid'],
+ response=self.soa_response_params['response'],
+ rcode=self.soa_response_params['rcode'],
+ questions=self.soa_response_params['questions'],
+ tsig_ctx=verify_ctx)
+ if self.soa_response_params['axfr_after_soa'] != None:
+ self.conn.response_generator = \
+ self.soa_response_params['axfr_after_soa']
+
+ def _create_broken_response_data(self):
+ # This helper method creates a bogus "DNS message" that only contains
+ # 4 octets of data. The DNS message parser will raise an exception.
+ bogus_data = b'xxxx'
+ self.conn.reply_data = struct.pack('H', socket.htons(len(bogus_data)))
+ self.conn.reply_data += bogus_data
+
+ def _create_a(self, address):
+ rrset = RRset(Name('a.example.com'), TEST_RRCLASS, RRType.A(),
+ RRTTL(3600))
+ rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, address))
+ return rrset
+
+ def _create_soa(self, serial):
+ rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ RRTTL(3600))
+ rdata_str = 'm. r. ' + serial + ' 3600 1800 2419200 7200'
+ rrset.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS, rdata_str))
+ return rrset
+
+ def _create_ns(self, nsname='ns.'+TEST_ZONE_NAME_STR):
+ rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(), RRTTL(3600))
+ rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, nsname))
+ return rrset
+
+class TestAXFR(TestXfrinConnection):
+ def setUp(self):
+ super().setUp()
+
def __create_mock_tsig(self, key, error):
# This helper function creates a MockTSIGContext for a given key
# and TSIG error to be used as a result of verify (normally faked
@@ -236,31 +697,82 @@ class TestXfrinConnection(unittest.TestCase):
# to confirm an AF_INET6 socket has been created. A naive application
# tends to assume it's IPv4 only and hardcode AF_INET. This test
# uncovers such a bug.
- c = MockXfrinConnection({}, 'example.com.', TEST_RRCLASS, TEST_DB_FILE,
+ c = MockXfrinConnection({}, TEST_ZONE_NAME, TEST_RRCLASS, TEST_DB_FILE,
threading.Event(),
TEST_MASTER_IPV6_ADDRINFO)
c.bind(('::', 0))
c.close()
def test_init_chclass(self):
- c = XfrinConnection({}, 'example.com.', RRClass.CH(), TEST_DB_FILE,
- threading.Event(), TEST_MASTER_IPV4_ADDRINFO)
+ c = MockXfrinConnection({}, TEST_ZONE_NAME, RRClass.CH(), TEST_DB_FILE,
+ threading.Event(), TEST_MASTER_IPV4_ADDRINFO)
axfrmsg = c._create_query(RRType.AXFR())
self.assertEqual(axfrmsg.get_question()[0].get_class(),
RRClass.CH())
c.close()
- def test_send_query(self):
- def create_msg(query_type):
- msg = Message(Message.RENDER)
- query_id = 0x1035
- msg.set_qid(query_id)
- msg.set_opcode(Opcode.QUERY())
- msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name("example.com."), RRClass.IN(), query_type)
- msg.add_question(query_question)
- return msg
+ def test_create_query(self):
+ def check_query(expected_qtype, expected_auth):
+ '''Helper method to repeat the same pattern of tests'''
+ self.assertEqual(Opcode.QUERY(), msg.get_opcode())
+ self.assertEqual(Rcode.NOERROR(), msg.get_rcode())
+ self.assertEqual(1, msg.get_rr_count(Message.SECTION_QUESTION))
+ self.assertEqual(TEST_ZONE_NAME, msg.get_question()[0].get_name())
+ self.assertEqual(expected_qtype, msg.get_question()[0].get_type())
+ self.assertEqual(0, msg.get_rr_count(Message.SECTION_ANSWER))
+ self.assertEqual(0, msg.get_rr_count(Message.SECTION_ADDITIONAL))
+ if expected_auth is None:
+ self.assertEqual(0,
+ msg.get_rr_count(Message.SECTION_AUTHORITY))
+ else:
+ self.assertEqual(1,
+ msg.get_rr_count(Message.SECTION_AUTHORITY))
+ auth_rr = msg.get_section(Message.SECTION_AUTHORITY)[0]
+ self.assertEqual(expected_auth.get_name(), auth_rr.get_name())
+ self.assertEqual(expected_auth.get_type(), auth_rr.get_type())
+ self.assertEqual(expected_auth.get_class(),
+ auth_rr.get_class())
+ # In our test scenario RDATA must be 1
+ self.assertEqual(1, expected_auth.get_rdata_count())
+ self.assertEqual(1, auth_rr.get_rdata_count())
+ self.assertEqual(expected_auth.get_rdata()[0],
+ auth_rr.get_rdata()[0])
+
+ # Actual tests start here
+ # SOA query
+ msg = self.conn._create_query(RRType.SOA())
+ check_query(RRType.SOA(), None)
+
+ # AXFR query
+ msg = self.conn._create_query(RRType.AXFR())
+ check_query(RRType.AXFR(), None)
+
+ # IXFR query
+ msg = self.conn._create_query(RRType.IXFR())
+ check_query(RRType.IXFR(), begin_soa_rrset)
+ self.assertEqual(1230, self.conn._request_serial)
+
+ def test_create_ixfr_query_fail(self):
+ # In these cases _create_query() will fail to find a valid SOA RR to
+ # insert in the IXFR query, and should raise an exception.
+
+ self.conn._zone_name = Name('no-such-zone.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+
+ self.conn._zone_name = Name('partial-match-zone.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+
+ self.conn._zone_name = Name('no-soa.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+
+ self.conn._zone_name = Name('dup-soa.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+ def test_send_query(self):
def message_has_tsig(data):
# a simple check if the actual data contains a TSIG RR.
# At our level this simple check should suffice; other detailed
@@ -269,14 +781,6 @@ class TestXfrinConnection(unittest.TestCase):
msg.from_wire(data)
return msg.get_tsig_record() is not None
- self.conn._create_query = create_msg
- # soa request
- self.conn._send_query(RRType.SOA())
- self.assertEqual(self.conn.query_data, b'\x00\x1d\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\x06\x00\x01')
- # axfr request
- self.conn._send_query(RRType.AXFR())
- self.assertEqual(self.conn.query_data, b'\x00\x1d\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
-
# soa request with tsig
self.conn._tsig_key = TSIG_KEY
self.conn._send_query(RRType.SOA())
@@ -467,7 +971,7 @@ class TestXfrinConnection(unittest.TestCase):
self.conn._send_query(RRType.AXFR())
self.assertRaises(Exception, self._handle_xfrin_response)
- def test_response(self):
+ def test_axfr_response(self):
# normal case.
self.conn.response_generator = self._create_normal_response_data
self.conn._send_query(RRType.AXFR())
@@ -598,10 +1102,7 @@ class TestXfrinConnection(unittest.TestCase):
def test_do_soacheck_broken_response(self):
self.conn.response_generator = self._create_broken_response_data
- # XXX: TODO: this test failed here, should xfr not raise an
- # exception but simply drop and return FAIL?
- #self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
- self.assertRaises(MessageTooShort, self.conn.do_xfrin, True)
+ self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
def test_do_soacheck_badqid(self):
# the QID mismatch would internally trigger a XfrinException exception,
@@ -610,59 +1111,334 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_soa_response_data
self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
- def _handle_xfrin_response(self):
- # This helper methods iterates over all RRs (excluding the ending SOA)
- # transferred, and simply returns the number of RRs. The return value
- # may be used an assertion value for test cases.
- rrs = 0
- for rr in self.conn._handle_xfrin_response():
- rrs += 1
- return rrs
+class TestIXFRResponse(TestXfrinConnection):
+ def setUp(self):
+ super().setUp()
+ self.conn._query_id = self.conn.qid = 1035
+ self.conn._request_serial = 1230
+ self.conn._request_type = RRType.IXFR()
+ self._zone_name = TEST_ZONE_NAME
+ self.conn._datasrc_client = MockDataSourceClient()
+ XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
- def _create_normal_response_data(self):
- # This helper method creates a simple sequence of DNS messages that
- # forms a valid XFR transaction. It consists of two messages, each
- # containing just a single SOA RR.
- tsig_1st = self.axfr_response_params['tsig_1st']
- tsig_2nd = self.axfr_response_params['tsig_2nd']
- self.conn.reply_data = self.conn.create_response_data(tsig_ctx=tsig_1st)
- self.conn.reply_data += \
- self.conn.create_response_data(tsig_ctx=tsig_2nd)
+ def test_ixfr_response(self):
+ '''A simplest form of IXFR response.
- def _create_soa_response_data(self):
- # This helper method creates a DNS message that is supposed to be
- # used a valid response to SOA queries prior to XFR.
- # If tsig is True, it tries to verify the query with a locally
- # created TSIG context (which may or may not succeed) so that the
- # response will include a TSIG.
- # If axfr_after_soa is True, it resets the response_generator so that
- # a valid XFR messages will follow.
-
- verify_ctx = None
- if self.soa_response_params['tsig']:
- # xfrin (curreently) always uses TCP. strip off the length field.
- query_data = self.conn.query_data[2:]
- query_message = Message(Message.PARSE)
- query_message.from_wire(query_data)
- verify_ctx = TSIGContext(TSIG_KEY)
- verify_ctx.verify(query_message.get_tsig_record(), query_data)
+ It simply updates the zone's SOA one time.
+ '''
self.conn.reply_data = self.conn.create_response_data(
- bad_qid=self.soa_response_params['bad_qid'],
- response=self.soa_response_params['response'],
- rcode=self.soa_response_params['rcode'],
- questions=self.soa_response_params['questions'],
- tsig_ctx=verify_ctx)
- if self.soa_response_params['axfr_after_soa'] != None:
- self.conn.response_generator = \
- self.soa_response_params['axfr_after_soa']
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_multi_sequences(self):
+ '''Similar to the previous case, but with multiple diff seqs.
+
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset,
+ # removing one A in serial 1230
+ begin_soa_rrset, self._create_a('192.0.2.1'),
+ # adding one A in serial 1231
+ self._create_soa('1231'), self._create_a('192.0.2.2'),
+ # removing one A in serial 1231
+ self._create_soa('1231'), self._create_a('192.0.2.3'),
+ # adding one A in serial 1232
+ self._create_soa('1232'), self._create_a('192.0.2.4'),
+ # removing one A in serial 1232
+ self._create_soa('1232'), self._create_a('192.0.2.5'),
+ # adding one A in serial 1234
+ soa_rrset, self._create_a('192.0.2.6'),
+ soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset),
+ ('delete', self._create_a('192.0.2.1')),
+ ('add', self._create_soa('1231')),
+ ('add', self._create_a('192.0.2.2'))],
+ [('delete', self._create_soa('1231')),
+ ('delete', self._create_a('192.0.2.3')),
+ ('add', self._create_soa('1232')),
+ ('add', self._create_a('192.0.2.4'))],
+ [('delete', self._create_soa('1232')),
+ ('delete', self._create_a('192.0.2.5')),
+ ('add', soa_rrset),
+ ('add', self._create_a('192.0.2.6'))]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_multi_messages(self):
+ '''Similar to the first case, but RRs span over multiple messages.
+
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset])
+ self.conn.reply_data += self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_broken(self):
+ '''Test with a broken response.
+
+ '''
+ # SOA sequence is out-of-sync
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+ self._create_soa('1235')])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ # no diffs should have been committed
+ check_diffs(self.assertEqual,
+ [], self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_extra(self):
+ '''Test with an extra RR after the end of IXFR diff sequences.
+
+ IXFR should be rejected, but complete diff sequences should be
+ committed; it's not clear whether it's compliant to the protocol
+ specification, but it is how BIND 9 works and we do the same.
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset,
+ self._create_a('192.0.2.1')])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_to_axfr_response(self):
+ '''AXFR-style IXFR response.
+
+ It simply updates the zone's SOA one time.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ # The SOA should be added exactly once, and in our implementation
+ # it should be added at the end of the sequence.
+ check_diffs(self.assertEqual,
+ [[('add', ns_rr), ('add', a_rr), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_to_axfr_response_mismatch_soa(self):
+ '''AXFR-style IXFR response, but the two SOA are not the same.
+
+ In the current implementation, we accept it and use the second SOA.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, begin_soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ check_diffs(self.assertEqual,
+ [[('add', ns_rr), ('add', a_rr),
+ ('add', begin_soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_to_axfr_response_extra(self):
+ '''Test with an extra RR after the end of AXFR-style IXFR session.
+
+ The session should be rejected, and nothing should be committed.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.committed_diffs)
+
+class TestIXFRSession(TestXfrinConnection):
+ '''Tests for a full IXFR session (query and response).
+
+ Detailed corner cases should have been covered in test_create_query()
+ and TestIXFRResponse, so we'll only check some typical cases to confirm
+ the general logic flow.
+ '''
+ def setUp(self):
+ super().setUp()
- def _create_broken_response_data(self):
- # This helper method creates a bogus "DNS message" that only contains
- # 4 octets of data. The DNS message parser will raise an exception.
- bogus_data = b'xxxx'
- self.conn.reply_data = struct.pack('H', socket.htons(len(bogus_data)))
- self.conn.reply_data += bogus_data
+ def test_do_xfrin(self):
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+ self.conn.response_generator = create_ixfr_response
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+
+ # Check some details of the IXFR protocol processing
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ # Check if the query was IXFR.
+ qdata = self.conn.query_data[2:]
+ qmsg = Message(Message.PARSE)
+ qmsg.from_wire(qdata, len(qdata))
+ self.assertEqual(1, qmsg.get_rr_count(Message.SECTION_QUESTION))
+ self.assertEqual(TEST_ZONE_NAME, qmsg.get_question()[0].get_name())
+ self.assertEqual(RRType.IXFR(), qmsg.get_question()[0].get_type())
+
+ def test_do_xfrin_fail(self):
+ '''IXFR fails due to a protocol error.
+
+ '''
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+ self._create_soa('1235')])
+ self.conn.response_generator = create_ixfr_response
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+
+ def test_do_xfrin_fail(self):
+ '''IXFR fails due to a bogus DNS message.
+
+ '''
+ self._create_broken_response_data()
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+
+class TestIXFRSessionWithSQLite3(TestXfrinConnection):
+ '''Tests for IXFR sessions using an SQLite3 DB.
+
+ These are provided mainly to confirm the implementation actually works
+ in an environment closer to actual operational environments. So we
+ only check a few common cases; other details are tested using mock
+ data sources.
+
+ '''
+ def setUp(self):
+ self.sqlite3db_src = TESTDATA_SRCDIR + '/example.com.sqlite3'
+ self.sqlite3db_obj = TESTDATA_OBJDIR + '/example.com.sqlite3.copy'
+ super().setUp()
+ if os.path.exists(self.sqlite3db_obj):
+ os.unlink(self.sqlite3db_obj)
+ shutil.copyfile(self.sqlite3db_src, self.sqlite3db_obj)
+ self.conn._datasrc_client = DataSourceClient(self.sqlite3db_obj)
+
+ def tearDown(self):
+ if os.path.exists(self.sqlite3db_obj):
+ os.unlink(self.sqlite3db_obj)
+
+ def get_zone_serial(self):
+ result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
+ self.assertEqual(DataSourceClient.SUCCESS, result)
+ result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA(),
+ None, ZoneFinder.FIND_DEFAULT)
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual(1, soa.get_rdata_count())
+ return get_soa_serial(soa.get_rdata()[0])
+
+ def record_exist(self, name, type):
+ result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
+ self.assertEqual(DataSourceClient.SUCCESS, result)
+ result, soa = finder.find(name, type, None, ZoneFinder.FIND_DEFAULT)
+ return result == ZoneFinder.SUCCESS
+
+ def test_do_xfrin_sqlite3(self):
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+ self.conn.response_generator = create_ixfr_response
+
+ # Confirm xfrin succeeds and SOA is updated
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(1234, self.get_zone_serial())
+
+ def test_do_xfrin_sqlite3_fail(self):
+ '''Similar to the previous test, but xfrin fails due to error.
+
+ Check the DB is not changed.
+
+ '''
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+ self._create_soa('1235')])
+ self.conn.response_generator = create_ixfr_response
+
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(1230, self.get_zone_serial())
+
+ def test_do_xfrin_axfr_sqlite3(self):
+ '''AXFR-style IXFR.
+
+ '''
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, self._create_ns(), soa_rrset])
+ self.conn.response_generator = create_ixfr_response
+
+ # Confirm xfrin succeeds and SOA is updated, A RR is deleted.
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertTrue(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(1234, self.get_zone_serial())
+ self.assertFalse(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+
+ def test_do_xfrin_axfr_sqlite3_fail(self):
+ '''Similar to the previous test, but xfrin fails due to error.
+
+ Check the DB is not changed.
+
+ '''
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, self._create_ns(), soa_rrset, soa_rrset])
+ self.conn.response_generator = create_ixfr_response
+
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertTrue(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertTrue(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
class TestXfrinRecorder(unittest.TestCase):
def setUp(self):
@@ -789,6 +1565,8 @@ class TestXfrin(unittest.TestCase):
self.args)['result'][0], 0)
self.assertEqual(self.args['master'], self.xfr.xfrin_started_master_addr)
self.assertEqual(int(self.args['port']), self.xfr.xfrin_started_master_port)
+ # By default we use AXFR (for now)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
def test_command_handler_retransfer_short_command1(self):
# try it when only specifying the zone name (of unknown zone)
@@ -901,6 +1679,8 @@ class TestXfrin(unittest.TestCase):
self.xfr.xfrin_started_master_addr)
self.assertEqual(int(TEST_MASTER_PORT),
self.xfr.xfrin_started_master_port)
+ # By default we use AXFR (for now)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
def test_command_handler_notify(self):
# at this level, refresh is no different than retransfer.
@@ -955,13 +1735,20 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(zone_info.tsig_key.to_text(), TSIGKey(zone_config['tsig_key']).to_text())
else:
self.assertIsNone(zone_info.tsig_key)
+ if 'ixfr_disabled' in zone_config and\
+ zone_config.get('ixfr_disabled'):
+ self.assertTrue(zone_info.ixfr_disabled)
+ else:
+ # if not set, should default to False
+ self.assertFalse(zone_info.ixfr_disabled)
def test_command_handler_zones(self):
config1 = { 'transfers_in': 3,
'zones': [
{ 'name': 'test.example.',
'master_addr': '192.0.2.1',
- 'master_port': 53
+ 'master_port': 53,
+ 'ixfr_disabled': False
}
]}
self.assertEqual(self.xfr.config_handler(config1)['result'][0], 0)
@@ -972,7 +1759,8 @@ class TestXfrin(unittest.TestCase):
{ 'name': 'test.example.',
'master_addr': '192.0.2.2',
'master_port': 53,
- 'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g=="
+ 'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g==",
+ 'ixfr_disabled': True
}
]}
self.assertEqual(self.xfr.config_handler(config2)['result'][0], 0)
@@ -1082,6 +1870,38 @@ class TestXfrin(unittest.TestCase):
# since this has failed, we should still have the previous config
self._check_zones_config(config2)
+ def common_ixfr_setup(self, xfr_mode, ixfr_disabled):
+ # This helper method explicitly sets up a zone configuration with
+ # ixfr_disabled, and invokes either retransfer or refresh.
+ # Shared by some of the following test cases.
+ config = {'zones': [
+ {'name': 'example.com.',
+ 'master_addr': '192.0.2.1',
+ 'ixfr_disabled': ixfr_disabled}]}
+ self.assertEqual(self.xfr.config_handler(config)['result'][0], 0)
+ self.assertEqual(self.xfr.command_handler(xfr_mode,
+ self.args)['result'][0], 0)
+
+ def test_command_handler_retransfer_ixfr_enabled(self):
+ # If IXFR is explicitly enabled in config, IXFR will be used
+ self.common_ixfr_setup('retransfer', False)
+ self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
+
+ def test_command_handler_refresh_ixfr_enabled(self):
+ # Same for refresh
+ self.common_ixfr_setup('refresh', False)
+ self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
+
+ def test_command_handler_retransfer_ixfr_disabled(self):
+ # Similar to the previous case, but explicitly disabled. AXFR should
+ # be used.
+ self.common_ixfr_setup('retransfer', True)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+
+ def test_command_handler_refresh_ixfr_disabled(self):
+ # Same for refresh
+ self.common_ixfr_setup('refresh', True)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
def raise_interrupt():
raise KeyboardInterrupt()
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 07de8f0..5887801 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -28,8 +28,10 @@ from optparse import OptionParser, OptionValueError
from isc.config.ccsession import *
from isc.notify import notify_out
import isc.util.process
+from isc.datasrc import DataSourceClient, ZoneFinder
import isc.net.parse
-from xfrin_messages import *
+from isc.xfrin.diff import Diff
+from isc.log_messages.xfrin_messages import *
isc.log.init("b10-xfrin")
logger = isc.log.Logger("xfrin")
@@ -62,6 +64,9 @@ ZONE_MANAGER_MODULE_NAME = 'Zonemgr'
REFRESH_FROM_ZONEMGR = 'refresh_from_zonemgr'
ZONE_XFRIN_FAILED = 'zone_xfrin_failed'
+# Constants for debug levels, to be removed when we have #1074.
+DBG_XFRIN_TRACE = 3
+
# These two default are currently hard-coded. For config this isn't
# necessary, but we need these defaults for optional command arguments
# (TODO: have similar support to get default values for command
@@ -77,6 +82,11 @@ XFRIN_FAIL = 1
class XfrinException(Exception):
pass
+class XfrinProtocolError(Exception):
+ '''An exception raised for errors encountered in xfrin protocol handling.
+ '''
+ pass
+
class XfrinZoneInfoException(Exception):
"""This exception is raised if there is an error in the given
configuration (part), or when a command does not have a required
@@ -112,24 +122,350 @@ def _check_zone_class(zone_class_str):
except InvalidRRClass as irce:
raise XfrinZoneInfoException("bad zone class: " + zone_class_str + " (" + str(irce) + ")")
+def get_soa_serial(soa_rdata):
+ '''Extract the serial field of an SOA RDATA and returns it as an intger.
+
+ We don't have to be very efficient here, so we first dump the entire RDATA
+ as a string and convert the first corresponding field. This should be
+ sufficient in practice, but may not always work when the MNAME or RNAME
+ contains an (escaped) space character in their labels. Ideally there
+ should be a more direct and convenient way to get access to the SOA
+ fields.
+ '''
+ return int(soa_rdata.to_text().split()[2])
+
+class XfrinState:
+ '''
+ The states of the incomding *XFR state machine.
+
+ We (will) handle both IXFR and AXFR with a single integrated state
+ machine because they cannot be distinguished immediately - an AXFR
+ response to an IXFR request can only be detected when the first two (2)
+ response RRs have already been received.
+
+ The following diagram summarizes the state transition. After sending
+ the query, xfrin starts the process with the InitialSOA state (all
+ IXFR/AXFR response begins with an SOA). When it reaches IXFREnd
+ or AXFREnd, the process successfully completes.
+
+
+ (recv SOA) (AXFR-style IXFR) (SOA, add)
+ InitialSOA------->FirstData------------->AXFR--------->AXFREnd
+ | | ^ (post xfr
+ | | | checks, then
+ | +--+ commit)
+ | (non SOA, add)
+ |
+ | (non SOA, delete)
+ (pure IXFR,| +-------+
+ keep handling)| (Delete SOA) V |
+ + ->IXFRDeleteSOA------>IXFRDelete--+
+ ^ |
+ (see SOA, not end, | (see SOA)|
+ commit, keep handling) | |
+ | V
+ +---------IXFRAdd<----------+IXFRAddSOA
+ (non SOA, add)| ^ | (Add SOA)
+ ----------+ |
+ |(see SOA w/ end serial, commit changes)
+ V
+ IXFREnd
+
+ Note that changes are committed for every "difference sequence"
+ (i.e. changes for one SOA update). This means when an IXFR response
+ contains multiple difference sequences and something goes wrong
+ after several commits, these changes have been published and visible
+ to clients even if the IXFR session is subsequently aborted.
+ It is not clear if this is valid in terms of the protocol specification.
+ Section 4 of RFC 1995 states:
+
+ An IXFR client, should only replace an older version with a newer
+ version after all the differences have been successfully processed.
+
+ If this "replacement" is for the changes of one difference sequence
+ and "all the differences" mean the changes for that sequence, this
+ implementation strictly follows what RFC states. If this is for
+ the entire IXFR response (that may contain multiple sequences),
+ we should implement it with one big transaction and one final commit
+ at the very end.
+
+ For now, we implement it with multiple smaller commits for two
+ reasons. First, this is what BIND 9 does, and we generally port
+ the implementation logic here. BIND 9 has been supporting IXFR
+ for many years, so the fact that it still behaves this way
+ probably means it at least doesn't cause a severe operational
+ problem in practice. Second, especially because BIND 10 would
+ often uses a database backend, a larger transaction could cause an
+ undesirable effects, e.g. suspending normal lookups for a longer
+ period depending on the characteristics of the database. Even if
+ we find something wrong in a later sequeunce and abort the
+ session, we can start another incremental update from what has
+ been validated, or we can switch to AXFR to replace the zone
+ completely.
+
+ This implementation uses the state design pattern, where each state
+ is represented as a subclass of the base XfrinState class. Each concrete
+ subclass of XfrinState is assumed to define two methods: handle_rr() and
+ finish_message(). These methods handle specific part of XFR protocols
+ and (if necessary) perform the state transition.
+
+ Conceptually, XfrinState and its subclasses are a "friend" of
+ XfrinConnection and are assumed to be allowed to access its internal
+ information (even though Python does not have a strict access control
+ between different classes).
+
+ The XfrinState and its subclasses are designed to be stateless, and
+ can be used as singleton objects. For now, however, we always instantiate
+ a new object for every state transition, partly because the introduction
+ of singleton will make a code bit complicated, and partly because
+ the overhead of object instantiotion wouldn't be significant for xfrin.
+
+ '''
+ def set_xfrstate(self, conn, new_state):
+ '''Set the XfrConnection to a given new state.
+
+ As a "friend" class, this method intentionally gets access to the
+ connection's "private" method.
+
+ '''
+ conn._XfrinConnection__set_xfrstate(new_state)
+
+ def handle_rr(self, conn):
+ '''Handle one RR of an XFR response message.
+
+ Depending on the state, the RR is generally added or deleted in the
+ corresponding data source, or in some special cases indicates
+ a specifi transition, such as starting a new IXFR difference
+ sequence or completing the session.
+
+ All subclass has their specific behaviors for this method, so
+ there is no default definition. If the base class version
+ is called, it's a bug of the caller, and it's notified via
+ an XfrinException exception.
+
+ This method returns a boolean value: True if the given RR was
+ fully handled and the caller should go to the next RR; False
+ if the caller needs to call this method with the (possibly) new
+ state for the same RR again.
+
+ '''
+ raise XfrinException("Internal bug: " +
+ "XfrinState.handle_rr() called directly")
+
+ def finish_message(self, conn):
+ '''Perform any final processing after handling all RRs of a response.
+
+ This method then returns a boolean indicating whether to continue
+ receiving the message. Unless it's in the end of the entire XFR
+ session, we should continue, so this default method simply returns
+ True.
+
+ '''
+ return True
+
+class XfrinInitialSOA(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() != RRType.SOA():
+ raise XfrinProtocolError('First RR in zone transfer must be SOA ('
+ + rr.get_type().to_text() + ' received)')
+ conn._end_serial = get_soa_serial(rr.get_rdata()[0])
+
+ # FIXME: we need to check the serial is actually greater than ours.
+ # To do so, however, we need to implement serial number arithmetic.
+ # Although it wouldn't be a big task, we'll leave it for a separate
+ # task for now. (Always performing xfr could be inefficient, but
+ # shouldn't do any harm otherwise)
+
+ self.set_xfrstate(conn, XfrinFirstData())
+ return True
+
+class XfrinFirstData(XfrinState):
+ def handle_rr(self, conn, rr):
+ '''Handle the first RR after initial SOA in an XFR session.
+
+ This state happens exactly once in an XFR session, where
+ we decide whether it's incremental update ("real" IXFR) or
+ non incremental update (AXFR or AXFR-style IXFR).
+ If we initiated IXFR and the transfer begins with two SOAs
+ (the serial of the second one being equal to our serial),
+ it's incremental; otherwise it's non incremental.
+
+ This method always return False (unlike many other handle_rr()
+ methods) because this first RR must be examined again in the
+ determined update context.
+
+ Note that in the non incremental case the RR should normally be
+ something other SOA, but it's still possible it's an SOA with a
+ different serial than ours. The only possible interpretation at
+ this point is that it's non incremental update that only consists
+ of the SOA RR. It will result in broken zone (for example, it
+ wouldn't even contain an apex NS) and should be rejected at post
+ XFR processing, but in terms of the XFR session processing we
+ accept it and move forward.
+
+ Note further that, in the half-broken SOA-only transfer case,
+ these two SOAs are supposed to be the same as stated in Section 2.2
+ of RFC 5936. We don't check that condition here, either; we'll
+ leave whether and how to deal with that situation to the end of
+ the processing of non incremental update. See also a related
+ discussion at the IETF dnsext wg:
+ http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+
+ '''
+ if conn._request_type == RRType.IXFR() and \
+ rr.get_type() == RRType.SOA() and \
+ conn._request_serial == get_soa_serial(rr.get_rdata()[0]):
+ logger.debug(DBG_XFRIN_TRACE, XFRIN_GOT_INCREMENTAL_RESP,
+ conn.zone_str())
+ self.set_xfrstate(conn, XfrinIXFRDeleteSOA())
+ else:
+ logger.debug(DBG_XFRIN_TRACE, XFRIN_GOT_NONINCREMENTAL_RESP,
+ conn.zone_str())
+ # We are now goint to add RRs to the new zone. We need create
+ # a Diff object. It will be used throughtout the XFR session.
+ conn._diff = Diff(conn._datasrc_client, conn._zone_name, True)
+ self.set_xfrstate(conn, XfrinAXFR())
+ return False
+
+class XfrinIXFRDeleteSOA(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() != RRType.SOA():
+ # this shouldn't happen; should this occur it means an internal
+ # bug.
+ raise XfrinException(rr.get_type().to_text() +
+ ' RR is given in IXFRDeleteSOA state')
+ # This is the beginning state of one difference sequence (changes
+ # for one SOA update). We need to create a new Diff object now.
+ conn._diff = Diff(conn._datasrc_client, conn._zone_name)
+ conn._diff.delete_data(rr)
+ self.set_xfrstate(conn, XfrinIXFRDelete())
+ return True
+
+class XfrinIXFRDelete(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() == RRType.SOA():
+ # This is the only place where current_serial is set
+ conn._current_serial = get_soa_serial(rr.get_rdata()[0])
+ self.set_xfrstate(conn, XfrinIXFRAddSOA())
+ return False
+ conn._diff.delete_data(rr)
+ return True
+
+class XfrinIXFRAddSOA(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() != RRType.SOA():
+ # this shouldn't happen; should this occur it means an internal
+ # bug.
+ raise XfrinException(rr.get_type().to_text() +
+ ' RR is given in IXFRAddSOA state')
+ conn._diff.add_data(rr)
+ self.set_xfrstate(conn, XfrinIXFRAdd())
+ return True
+
+class XfrinIXFRAdd(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() == RRType.SOA():
+ soa_serial = get_soa_serial(rr.get_rdata()[0])
+ if soa_serial == conn._end_serial:
+ conn._diff.commit()
+ self.set_xfrstate(conn, XfrinIXFREnd())
+ return True
+ elif soa_serial != conn._current_serial:
+ raise XfrinProtocolError('IXFR out of sync: expected ' +
+ 'serial ' +
+ str(conn._current_serial) +
+ ', got ' + str(soa_serial))
+ else:
+ conn._diff.commit()
+ self.set_xfrstate(conn, XfrinIXFRDeleteSOA())
+ return False
+ conn._diff.add_data(rr)
+ return True
+
+class XfrinIXFREnd(XfrinState):
+ def handle_rr(self, conn, rr):
+ raise XfrinProtocolError('Extra data after the end of IXFR diffs: ' +
+ rr.to_text())
+
+ def finish_message(self, conn):
+ '''Final processing after processing an entire IXFR session.
+
+ There will be more actions here, but for now we simply return False,
+ indicating there will be no more message to receive.
+
+ '''
+ return False
+
+class XfrinAXFR(XfrinState):
+ def handle_rr(self, conn, rr):
+ """
+ Handle the RR by putting it into the zone.
+ """
+ conn._diff.add_data(rr)
+ if rr.get_type() == RRType.SOA():
+ # SOA means end. Don't commit it yet - we need to perform
+ # post-transfer checks
+ self.set_xfrstate(conn, XfrinAXFREnd())
+ # Yes, we've eaten this RR.
+ return True
+
+class XfrinAXFREnd(XfrinState):
+ def handle_rr(self, conn, rr):
+ raise XfrinProtocolError('Extra data after the end of AXFR: ' +
+ rr.to_text())
+
+ def finish_message(self, conn):
+ """
+ Final processing after processing an entire AXFR session.
+
+ In this process all the AXFR changes are committed to the
+ data source.
+
+ There might be more actions here, but for now we simply return False,
+ indicating there will be no more message to receive.
+
+ """
+ conn._diff.commit()
+ return False
+
class XfrinConnection(asyncore.dispatcher):
'''Do xfrin in this class. '''
def __init__(self,
- sock_map, zone_name, rrclass, db_file, shutdown_event,
- master_addrinfo, tsig_key = None, verbose = False,
- idle_timeout = 60):
- ''' idle_timeout: max idle time for read data from socket.
- db_file: specify the data source file.
- check_soa: when it's true, check soa first before sending xfr query
+ sock_map, zone_name, rrclass, datasrc_client, db_file,
+ shutdown_event, master_addrinfo, tsig_key = None,
+ verbose=False, idle_timeout=60):
+ '''Constructor of the XfrinConnection class.
+
+ idle_timeout: max idle time for read data from socket.
+ datasrc_client: the data source client object used for the XFR session.
+ This will eventually replace db_file completely.
+ db_file: specify the data source file (should soon be deprecated).
+
'''
asyncore.dispatcher.__init__(self, map=sock_map)
- self.create_socket(master_addrinfo[0], master_addrinfo[1])
+
+ # The XFR state. Conceptually this is purely private, so we emphasize
+ # the fact by the double underscore. Other classes are assumed to
+ # get access to this via get_xfrstate(), and only XfrinState classes
+ # are assumed to be allowed to modify it via __set_xfrstate().
+ self.__state = None
+
+ # Requested transfer type (RRType.AXFR or RRType.IXFR). The actual
+ # transfer type may differ due to IXFR->AXFR fallback:
+ self._request_type = None
+
+ # Zone parameters
self._zone_name = zone_name
- self._sock_map = sock_map
self._rrclass = rrclass
- self._db_file = db_file
+
+ # Data source handlers
+ self._db_file = db_file # temporary for sqlite3 specific code
+ self._datasrc_client = datasrc_client
+
+ self.create_socket(master_addrinfo[0], master_addrinfo[1])
+ self._sock_map = sock_map
self._soa_rr_count = 0
self._idle_timeout = idle_timeout
self.setblocking(1)
@@ -145,6 +481,16 @@ class XfrinConnection(asyncore.dispatcher):
def __create_tsig_ctx(self, key):
return TSIGContext(key)
+ def __set_xfrstate(self, new_state):
+ self.__state = new_state
+
+ def get_xfrstate(self):
+ return self.__state
+
+ def zone_str(self):
+ '''A convenient function for logging to include zone name and class'''
+ return self._zone_name.to_text() + '/' + str(self._rrclass)
+
def connect_to_master(self):
'''Connect to master in TCP.'''
@@ -156,16 +502,47 @@ class XfrinConnection(asyncore.dispatcher):
return False
def _create_query(self, query_type):
- '''Create dns query message. '''
+ '''Create an XFR-related query message.
+
+ query_type is either SOA, AXFR or IXFR. For type IXFR, it searches
+ the associated data source for the current SOA record to include
+ it in the query. If the corresponding zone or the SOA record
+ cannot be found, it raises an XfrinException exception. Note that
+ this may not necessarily a broken configuration; for the first attempt
+ of transfer the secondary may not have any boot-strap zone
+ information, in which case IXFR simply won't work. The xfrin
+ should then fall back to AXFR. _request_serial is recorded for
+ later use.
+ '''
msg = Message(Message.RENDER)
query_id = random.randint(0, 0xFFFF)
self._query_id = query_id
msg.set_qid(query_id)
msg.set_opcode(Opcode.QUERY())
msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name(self._zone_name), self._rrclass, query_type)
- msg.add_question(query_question)
+ msg.add_question(Question(self._zone_name, self._rrclass, query_type))
+ if query_type == RRType.IXFR():
+ # get the zone finder. this must be SUCCESS (not even
+ # PARTIALMATCH) because we are specifying the zone origin name.
+ result, finder = self._datasrc_client.find_zone(self._zone_name)
+ if result != DataSourceClient.SUCCESS:
+ raise XfrinException('Zone not found in the given data ' +
+ 'source: ' + self.zone_str())
+ result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
+ None, ZoneFinder.FIND_DEFAULT)
+ if result != ZoneFinder.SUCCESS:
+ raise XfrinException('SOA RR not found in zone: ' +
+ self.zone_str())
+ # Especially for database-based zones, a working zone may be in
+ # a broken state where it has more than one SOA RR. We proactively
+ # check the condition and abort the xfr attempt if we identify it.
+ if soa_rrset.get_rdata_count() != 1:
+ raise XfrinException('Invalid number of SOA RRs for ' +
+ self.zone_str() + ': ' +
+ str(soa_rrset.get_rdata_count()))
+ msg.add_rrset(Message.SECTION_AUTHORITY, soa_rrset)
+ self._request_serial = get_soa_serial(soa_rrset.get_rdata()[0])
return msg
def _send_data(self, data):
@@ -256,39 +633,61 @@ class XfrinConnection(asyncore.dispatcher):
# now.
return XFRIN_OK
- def do_xfrin(self, check_soa, ixfr_first = False):
- '''Do xfr by sending xfr request and parsing response. '''
+ def do_xfrin(self, check_soa, request_type=RRType.AXFR()):
+ '''Do an xfr session by sending xfr request and parsing responses.'''
try:
ret = XFRIN_OK
+ self._request_type = request_type
+ # Right now RRType.[IA]XFR().to_text() is 'TYPExxx', so we need
+ # to hardcode here.
+ request_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
if check_soa:
- logstr = 'SOA check for \'%s\' ' % self._zone_name
ret = self._check_soa_serial()
if ret == XFRIN_OK:
- logger.info(XFRIN_AXFR_TRANSFER_STARTED, self._zone_name)
- self._send_query(RRType.AXFR())
- isc.datasrc.sqlite3_ds.load(self._db_file, self._zone_name,
- self._handle_xfrin_response)
-
- logger.info(XFRIN_AXFR_TRANSFER_SUCCESS, self._zone_name)
-
- except XfrinException as e:
- logger.error(XFRIN_AXFR_TRANSFER_FAILURE, self._zone_name, str(e))
+ logger.info(XFRIN_XFR_TRANSFER_STARTED, request_str,
+ self.zone_str())
+ if self._request_type == RRType.IXFR():
+ self._request_type = RRType.IXFR()
+ self._send_query(self._request_type)
+ self.__state = XfrinInitialSOA()
+ self._handle_xfrin_responses()
+ else:
+ self._send_query(self._request_type)
+ isc.datasrc.sqlite3_ds.load(self._db_file,
+ self._zone_name.to_text(),
+ self._handle_axfrin_response)
+ logger.info(XFRIN_XFR_TRANSFER_SUCCESS, request_str,
+ self.zone_str())
+
+ except (XfrinException, XfrinProtocolError) as e:
+ logger.error(XFRIN_XFR_TRANSFER_FAILURE, request_str,
+ self.zone_str(), str(e))
ret = XFRIN_FAIL
- #TODO, recover data source.
except isc.datasrc.sqlite3_ds.Sqlite3DSError as e:
- logger.error(XFRIN_AXFR_DATABASE_FAILURE, self._zone_name, str(e))
+ # Note: this is old code and used only for AXFR. This will be
+ # soon removed anyway, so we'll leave it.
+ logger.error(XFRIN_AXFR_DATABASE_FAILURE, self.zone_str(), str(e))
ret = XFRIN_FAIL
- except UserWarning as e:
- # XXX: this is an exception from our C++ library via the
- # Boost.Python binding. It would be better to have more more
- # specific exceptions, but at this moment this is the finest
- # granularity.
- logger.error(XFRIN_AXFR_INTERNAL_FAILURE, self._zone_name, str(e))
+ except Exception as e:
+ # Catching all possible exceptions like this is generally not a
+ # good practice, but handling an xfr session could result in
+ # so many types of exceptions, including ones from the DNS library
+ # or from the data source library. Eventually we'd introduce a
+ # hierarchy for exception classes from a base "ISC exception" and
+ # catch it here, but until then we need broadest coverage so that
+ # we won't miss anything.
+
+ logger.error(XFRIN_XFR_OTHER_FAILURE, request_str,
+ self.zone_str(), str(e))
ret = XFRIN_FAIL
finally:
- self.close()
+ # Make sure any remaining transaction in the diff is closed
+ # (if not yet - possible in case of xfr-level exception) as soon
+ # as possible
+ self._diff = None
+ self.close()
return ret
@@ -351,7 +750,32 @@ class XfrinConnection(asyncore.dispatcher):
yield (rrset_name, rrset_ttl, rrset_class, rrset_type,
rdata_text)
- def _handle_xfrin_response(self):
+ def _handle_xfrin_responses(self):
+ read_next_msg = True
+ while read_next_msg:
+ data_len = self._get_request_response(2)
+ msg_len = socket.htons(struct.unpack('H', data_len)[0])
+ recvdata = self._get_request_response(msg_len)
+ msg = Message(Message.PARSE)
+ msg.from_wire(recvdata, Message.PRESERVE_ORDER)
+
+ # TSIG related checks, including an unexpected signed response
+ self._check_response_tsig(msg, recvdata)
+
+ # Perform response status validation
+ self._check_response_status(msg)
+
+ for rr in msg.get_section(Message.SECTION_ANSWER):
+ rr_handled = False
+ while not rr_handled:
+ rr_handled = self.__state.handle_rr(self, rr)
+
+ read_next_msg = self.__state.finish_message(self)
+
+ if self._shutdown_event.is_set():
+ raise XfrinException('xfrin is forced to stop')
+
+ def _handle_axfrin_response(self):
'''Return a generator for the response to a zone transfer. '''
while True:
data_len = self._get_request_response(2)
@@ -394,15 +818,27 @@ class XfrinConnection(asyncore.dispatcher):
def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
shutdown_event, master_addrinfo, check_soa, verbose,
- tsig_key):
+ tsig_key, request_type):
xfrin_recorder.increment(zone_name)
+
+ # Create a data source client used in this XFR session. Right now we
+ # still assume an sqlite3-based data source, and use both the old and new
+ # data source APIs. We also need to use a mock client for tests.
+ # For a temporary workaround to deal with these situations, we skip the
+ # creation when the given file is none (the test case). Eventually
+ # this code will be much cleaner.
+ datasrc_client = None
+ if db_file is not None:
+ datasrc_client = DataSourceClient(db_file)
+
+ # Create a TCP connection for the XFR session and perform the operation.
sock_map = {}
- conn = XfrinConnection(sock_map, zone_name, rrclass, db_file,
- shutdown_event, master_addrinfo,
+ conn = XfrinConnection(sock_map, zone_name, rrclass, datasrc_client,
+ db_file, shutdown_event, master_addrinfo,
tsig_key, verbose)
ret = XFRIN_FAIL
if conn.connect_to_master():
- ret = conn.do_xfrin(check_soa)
+ ret = conn.do_xfrin(check_soa, request_type)
# Publish the zone transfer result news, so zonemgr can reset the
# zone timer, and xfrout can notify the zone's slaves if the result
@@ -451,6 +887,7 @@ class ZoneInfo:
self.set_master_port(config_data.get('master_port'))
self.set_zone_class(config_data.get('class'))
self.set_tsig_key(config_data.get('tsig_key'))
+ self.set_ixfr_disabled(config_data.get('ixfr_disabled'))
def set_name(self, name_str):
"""Set the name for this zone given a name string.
@@ -525,6 +962,16 @@ class ZoneInfo:
errmsg = "bad TSIG key string: " + tsig_key_str
raise XfrinZoneInfoException(errmsg)
+ def set_ixfr_disabled(self, ixfr_disabled):
+ """Set ixfr_disabled. If set to False (the default), it will use
+ IXFR for incoming transfers. If set to True, it will use AXFR.
+ At this moment there is no automatic fallback"""
+ # don't care what type it is; if evaluates to true, set to True
+ if ixfr_disabled:
+ self.ixfr_disabled = True
+ else:
+ self.ixfr_disabled = False
+
def get_master_addr_info(self):
return (self.master_addr.family, socket.SOCK_STREAM,
(str(self.master_addr), self.master_port))
@@ -635,7 +1082,7 @@ class Xfrin:
rrclass,
self._get_db_file(),
master_addr,
- zone_info.tsig_key,
+ zone_info.tsig_key, RRType.AXFR(),
True)
answer = create_answer(ret[0], ret[1])
@@ -648,14 +1095,17 @@ class Xfrin:
rrclass)
zone_info = self._get_zone_info(zone_name, rrclass)
tsig_key = None
+ request_type = RRType.AXFR()
if zone_info:
tsig_key = zone_info.tsig_key
+ if not zone_info.ixfr_disabled:
+ request_type = RRType.IXFR()
db_file = args.get('db_file') or self._get_db_file()
ret = self.xfrin_start(zone_name,
rrclass,
db_file,
master_addr,
- tsig_key,
+ tsig_key, request_type,
(False if command == 'retransfer' else True))
answer = create_answer(ret[0], ret[1])
@@ -735,7 +1185,8 @@ class Xfrin:
news(command: zone_new_data_ready) to zone manager and xfrout.
if xfrin failed, just tell the bad news to zone manager, so that
it can reset the refresh timer for that zone. '''
- param = {'zone_name': zone_name, 'zone_class': zone_class.to_text()}
+ param = {'zone_name': zone_name.to_text(),
+ 'zone_class': zone_class.to_text()}
if xfr_result == XFRIN_OK:
msg = create_command(notify_out.ZONE_NEW_DATA_READY_CMD, param)
# catch the exception, in case msgq has been killed.
@@ -772,8 +1223,8 @@ class Xfrin:
while not self._shutdown_event.is_set():
self._cc_check_command()
- def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo, tsig_key,
- check_soa = True):
+ def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo,
+ tsig_key, request_type, check_soa=True):
if "pydnspp" not in sys.modules:
return (1, "xfrin failed, can't load dns message python library: 'pydnspp'")
@@ -787,13 +1238,13 @@ class Xfrin:
xfrin_thread = threading.Thread(target = process_xfrin,
args = (self,
self.recorder,
- zone_name.to_text(),
+ zone_name,
rrclass,
db_file,
self._shutdown_event,
master_addrinfo, check_soa,
self._verbose,
- tsig_key))
+ tsig_key, request_type))
xfrin_thread.start()
return (0, 'zone xfrin is started')
diff --git a/src/bin/xfrin/xfrin.spec b/src/bin/xfrin/xfrin.spec
index a3e62ce..bc93720 100644
--- a/src/bin/xfrin/xfrin.spec
+++ b/src/bin/xfrin/xfrin.spec
@@ -43,6 +43,11 @@
{ "item_name": "tsig_key",
"item_type": "string",
"item_optional": true
+ },
+ { "item_name": "ixfr_disabled",
+ "item_type": "boolean",
+ "item_optional": false,
+ "item_default": false
}
]
}
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index 80a0be3..5e3e347 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -15,25 +15,26 @@
# No namespace declaration - these constants go in the global namespace
# of the xfrin messages python module.
-% XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to an internal
-problem in the bind10 python wrapper library.
-The error is shown in the log message.
+% XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module. Possible reasons are a broken DNS message or failure
+in database connection. The error is shown in the log message.
% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message.
+The error is shown in the log message. Note: due to the code structure
+this can only happen for AXFR.
-% XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to a protocol error.
+% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3
+The XFR transfer for the given zone has failed due to a protocol error.
The error is shown in the log message.
-% XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started
+% XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started
A connection to the master server has been made, the serial value in
the SOA record has been checked, and a zone transfer has been started.
-% XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded
-The AXFR transfer of the given zone was successfully completed.
+% XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded
+The XFR transfer of the given zone was successfully completed.
% XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1
The given master address is not a valid IP address.
@@ -89,3 +90,12 @@ daemon will now shut down.
% XFRIN_UNKNOWN_ERROR unknown error: %1
An uncaught exception was raised while running the xfrin daemon. The
exception message is printed in the log message.
+
+% XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found. This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+
+% XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1
+TBD
diff --git a/src/bin/xfrout/Makefile.am b/src/bin/xfrout/Makefile.am
index c5492ad..6100e64 100644
--- a/src/bin/xfrout/Makefile.am
+++ b/src/bin/xfrout/Makefile.am
@@ -6,9 +6,13 @@ pkglibexec_SCRIPTS = b10-xfrout
b10_xfroutdir = $(pkgdatadir)
b10_xfrout_DATA = xfrout.spec
-pyexec_DATA = xfrout_messages.py
-CLEANFILES= b10-xfrout xfrout.pyc xfrout.spec xfrout_messages.py xfrout_messages.pyc
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-xfrout xfrout.pyc xfrout.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.pyc
man_MANS = b10-xfrout.8
EXTRA_DIST = $(man_MANS) b10-xfrout.xml xfrout_messages.mes
@@ -21,14 +25,15 @@ b10-xfrout.8: b10-xfrout.xml
endif
# Define rule to build logging source files from message file
-xfrout_messages.py: xfrout_messages.mes
- $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrout/xfrout_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py : xfrout_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/xfrout_messages.mes
xfrout.spec: xfrout.spec.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.spec.pre >$@
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrout: xfrout.py xfrout_messages.py
+b10-xfrout: xfrout.py $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.py >$@
chmod a+x $@
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index ad71fe2..9889b80 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -134,6 +134,14 @@
data storage types.
</simpara></note>
+
+<!--
+
+tsig_key_ring list of
+tsig_key string
+
+-->
+
<!-- TODO: formating -->
<para>
The configuration commands are:
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index 99f4843..ace8fc9 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -1,15 +1,17 @@
PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = xfrout_test.py
-EXTRA_DIST = $(PYTESTS)
+noinst_SCRIPTS = $(PYTESTS)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
+# We set B10_FROM_BUILD below, so that the test can refer to the in-source
+# spec file.
check-local:
if ENABLE_PYTHON_COVERAGE
touch $(abs_top_srcdir)/.coverage
@@ -18,7 +20,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ B10_FROM_BUILD=$(abs_top_builddir) \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/xfrout:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/xfrout:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 62c7708..85979a0 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -20,6 +20,7 @@ import unittest
import os
from isc.testutils.tsigctx_mock import MockTSIGContext
from isc.cc.session import *
+import isc.config
from pydnspp import *
from xfrout import *
import xfrout
@@ -101,20 +102,24 @@ class TestXfroutSession(unittest.TestCase):
def message_has_tsig(self, msg):
return msg.get_tsig_record() is not None
- def create_request_data_with_tsig(self):
+ def create_request_data(self, with_tsig=False):
msg = Message(Message.RENDER)
query_id = 0x1035
msg.set_qid(query_id)
msg.set_opcode(Opcode.QUERY())
msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name("example.com."), RRClass.IN(), RRType.AXFR())
+ query_question = Question(Name("example.com"), RRClass.IN(),
+ RRType.AXFR())
msg.add_question(query_question)
renderer = MessageRenderer()
- tsig_ctx = MockTSIGContext(TSIG_KEY)
- msg.to_wire(renderer, tsig_ctx)
- reply_data = renderer.get_data()
- return reply_data
+ if with_tsig:
+ tsig_ctx = MockTSIGContext(TSIG_KEY)
+ msg.to_wire(renderer, tsig_ctx)
+ else:
+ msg.to_wire(renderer)
+ request_data = renderer.get_data()
+ return request_data
def setUp(self):
self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
@@ -122,8 +127,9 @@ class TestXfroutSession(unittest.TestCase):
TSIGKeyRing(), ('127.0.0.1', 12345),
# When not testing ACLs, simply accept
isc.acl.dns.REQUEST_LOADER.load(
- [{"action": "ACCEPT"}]))
- self.mdata = bytes(b'\xd6=\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
+ [{"action": "ACCEPT"}]),
+ {})
+ self.mdata = self.create_request_data(False)
self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
def test_parse_query_message(self):
@@ -131,7 +137,7 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(get_rcode.to_text(), "NOERROR")
# tsig signed query message
- request_data = self.create_request_data_with_tsig()
+ request_data = self.create_request_data(True)
# BADKEY
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOTAUTH")
@@ -143,8 +149,9 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(rcode.to_text(), "NOERROR")
self.assertTrue(self.xfrsess._tsig_ctx is not None)
+ def check_transfer_acl(self, acl_setter):
# ACL checks, put some ACL inside
- self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
{
"from": "127.0.0.1",
"action": "ACCEPT"
@@ -153,7 +160,7 @@ class TestXfroutSession(unittest.TestCase):
"from": "192.0.2.1",
"action": "DROP"
}
- ])
+ ]))
# Localhost (the default in this test) is accepted
rcode, msg = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "NOERROR")
@@ -165,6 +172,10 @@ class TestXfroutSession(unittest.TestCase):
self.xfrsess._remote = ('192.0.2.2', 12345)
rcode, msg = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
+
+ # TSIG signed request
+ request_data = self.create_request_data(True)
+
# If the TSIG check fails, it should not check ACL
# (If it checked ACL as well, it would just drop the request)
self.xfrsess._remote = ('192.0.2.1', 12345)
@@ -174,36 +185,36 @@ class TestXfroutSession(unittest.TestCase):
self.assertTrue(self.xfrsess._tsig_ctx is not None)
# ACL using TSIG: successful case
- self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
{"key": "example.com", "action": "ACCEPT"}, {"action": "REJECT"}
- ])
+ ]))
self.assertEqual(TSIGKeyRing.SUCCESS,
self.xfrsess._tsig_key_ring.add(TSIG_KEY))
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOERROR")
# ACL using TSIG: key name doesn't match; should be rejected
- self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
{"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
- ])
+ ]))
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "REFUSED")
# ACL using TSIG: no TSIG; should be rejected
- self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
{"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
- ])
+ ]))
[rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
#
# ACL using IP + TSIG: both should match
#
- self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
{"ALL": [{"key": "example.com"}, {"from": "192.0.2.1"}],
"action": "ACCEPT"},
{"action": "REJECT"}
- ])
+ ]))
# both matches
self.xfrsess._remote = ('192.0.2.1', 12345)
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
@@ -221,6 +232,63 @@ class TestXfroutSession(unittest.TestCase):
[rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
+ def test_transfer_acl(self):
+ # ACL checks only with the default ACL
+ def acl_setter(acl):
+ self.xfrsess._acl = acl
+ self.check_transfer_acl(acl_setter)
+
+ def test_transfer_zoneacl(self):
+ # ACL check with a per zone ACL + default ACL. The per zone ACL
+ # should match the queryied zone, so it should be used.
+ def acl_setter(acl):
+ zone_key = ('IN', 'example.com.')
+ self.xfrsess._zone_config[zone_key] = {}
+ self.xfrsess._zone_config[zone_key]['transfer_acl'] = acl
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "DROP"}])
+ self.check_transfer_acl(acl_setter)
+
+ def test_transfer_zoneacl_nomatch(self):
+ # similar to the previous one, but the per zone doesn't match the
+ # query. The default should be used.
+ def acl_setter(acl):
+ zone_key = ('IN', 'example.org.')
+ self.xfrsess._zone_config[zone_key] = {}
+ self.xfrsess._zone_config[zone_key]['transfer_acl'] = \
+ isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "DROP"}])
+ self.xfrsess._acl = acl
+ self.check_transfer_acl(acl_setter)
+
+ def test_get_transfer_acl(self):
+ # set the default ACL. If there's no specific zone ACL, this one
+ # should be used.
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "ACCEPT"}])
+ acl = self.xfrsess._get_transfer_acl(Name('example.com'), RRClass.IN())
+ self.assertEqual(acl, self.xfrsess._acl)
+
+ # install a per zone config with transfer ACL for example.com. Then
+ # that ACL will be used for example.com; for others the default ACL
+ # will still be used.
+ com_acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "REJECT"}])
+ self.xfrsess._zone_config[('IN', 'example.com.')] = {}
+ self.xfrsess._zone_config[('IN', 'example.com.')]['transfer_acl'] = \
+ com_acl
+ self.assertEqual(com_acl,
+ self.xfrsess._get_transfer_acl(Name('example.com'),
+ RRClass.IN()))
+ self.assertEqual(self.xfrsess._acl,
+ self.xfrsess._get_transfer_acl(Name('example.org'),
+ RRClass.IN()))
+
+ # Name matching should be case insensitive.
+ self.assertEqual(com_acl,
+ self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
+ RRClass.IN()))
+
def test_get_query_zone_name(self):
msg = self.getmsg()
self.assertEqual(self.xfrsess._get_query_zone_name(msg), "example.com.")
@@ -572,9 +640,11 @@ class TestXfroutSession(unittest.TestCase):
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
-class MyCCSession():
+class MyCCSession(isc.config.ConfigData):
def __init__(self):
- pass
+ module_spec = isc.config.module_spec_from_file(
+ xfrout.SPECFILE_LOCATION)
+ ConfigData.__init__(self, module_spec)
def get_remote_config_value(self, module_name, identifier):
if module_name == "Auth" and identifier == "database_file":
@@ -586,9 +656,9 @@ class MyCCSession():
class MyUnixSockServer(UnixSockServer):
def __init__(self):
self._shutdown_event = threading.Event()
- self._max_transfers_out = 10
- self._cc = MyCCSession()
self._common_init()
+ self._cc = MyCCSession()
+ self.update_config_data(self._cc.get_full_config())
class TestUnixSockServer(unittest.TestCase):
def setUp(self):
@@ -636,17 +706,17 @@ class TestUnixSockServer(unittest.TestCase):
socket.AI_NUMERICHOST)[0][4])
self.assertEqual(isc.acl.acl.ACCEPT, self.unix._acl.execute(context))
- def check_loaded_ACL(self):
+ def check_loaded_ACL(self, acl):
context = isc.acl.dns.RequestContext(socket.getaddrinfo("127.0.0.1",
1234, 0, socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
socket.AI_NUMERICHOST)[0][4])
- self.assertEqual(isc.acl.acl.ACCEPT, self.unix._acl.execute(context))
+ self.assertEqual(isc.acl.acl.ACCEPT, acl.execute(context))
context = isc.acl.dns.RequestContext(socket.getaddrinfo("192.0.2.1",
1234, 0, socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
socket.AI_NUMERICHOST)[0][4])
- self.assertEqual(isc.acl.acl.REJECT, self.unix._acl.execute(context))
+ self.assertEqual(isc.acl.acl.REJECT, acl.execute(context))
def test_update_config_data(self):
self.check_default_ACL()
@@ -671,14 +741,79 @@ class TestUnixSockServer(unittest.TestCase):
self.assertEqual(self.unix.tsig_key_ring.size(), 0)
# Load the ACL
- self.unix.update_config_data({'query_acl': [{'from': '127.0.0.1',
+ self.unix.update_config_data({'transfer_acl': [{'from': '127.0.0.1',
'action': 'ACCEPT'}]})
- self.check_loaded_ACL()
+ self.check_loaded_ACL(self.unix._acl)
# Pass a wrong data there and check it does not replace the old one
- self.assertRaises(isc.acl.acl.LoaderError,
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'transfer_acl': ['Something bad']})
+ self.check_loaded_ACL(self.unix._acl)
+
+ def test_zone_config_data(self):
+ # By default, there's no specific zone config
+ self.assertEqual({}, self.unix._zone_config)
+
+ # Adding config for a specific zone. The config is empty unless
+ # explicitly specified.
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com',
+ 'class': 'IN'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # zone class can be omitted
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # zone class, name are stored in the "normalized" form. class
+ # strings are upper cased, names are down cased.
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'EXAMPLE.com'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # invalid zone class, name will result in exceptions
+ self.assertRaises(EmptyLabel,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'bad..example'}]})
+ self.assertRaises(InvalidRRClass,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com',
+ 'class': 'badclass'}]})
+
+ # Configuring a couple of more zones
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com'},
+ {'origin': 'example.com',
+ 'class': 'CH'},
+ {'origin': 'example.org'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+ self.assertEqual({}, self.unix._zone_config[('CH', 'example.com.')])
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.org.')])
+
+ # Duplicate data: should be rejected with an exception
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com'},
+ {'origin': 'example.org'},
+ {'origin': 'example.com'}]})
+
+ def test_zone_config_data_with_acl(self):
+ # Similar to the previous test, but with transfer_acl config
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com',
+ 'transfer_acl':
+ [{'from': '127.0.0.1',
+ 'action': 'ACCEPT'}]}]})
+ acl = self.unix._zone_config[('IN', 'example.com.')]['transfer_acl']
+ self.check_loaded_ACL(acl)
+
+ # invalid ACL syntax will be rejected with exception
+ self.assertRaises(XfroutConfigError,
self.unix.update_config_data,
- {'query_acl': ['Something bad']})
- self.check_loaded_ACL()
+ {'zone_config': [{'origin': 'example.com',
+ 'transfer_acl':
+ [{'action': 'BADACTION'}]}]})
def test_get_db_file(self):
self.assertEqual(self.unix.get_db_file(), "initdb.file")
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index fe42c54..8049e29 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -35,7 +35,7 @@ import errno
from optparse import OptionParser, OptionValueError
from isc.util import socketserver_mixin
-from xfrout_messages import *
+from isc.log_messages.xfrout_messages import *
isc.log.init("b10-xfrout")
logger = isc.log.Logger("xfrout")
@@ -48,11 +48,23 @@ except ImportError as e:
# must keep running, so we warn about it and move forward.
log.error(XFROUT_IMPORT, str(e))
-from isc.acl.acl import ACCEPT, REJECT, DROP
+from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
from isc.acl.dns import REQUEST_LOADER
isc.util.process.rename()
+class XfroutConfigError(Exception):
+ """An exception indicating an error in updating xfrout configuration.
+
+ This exception is raised when the xfrout process encouters an error in
+ handling configuration updates. Not all syntax error can be caught
+ at the module-CC layer, so xfrout needs to (explicitly or implicitly)
+ validate the given configuration data itself. When it finds an error
+ it raises this exception (either directly or by converting an exception
+ from other modules) as a unified error in configuration.
+ """
+ pass
+
def init_paths():
global SPECFILE_PATH
global AUTH_SPECFILE_PATH
@@ -79,14 +91,12 @@ init_paths()
SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
-MAX_TRANSFERS_OUT = 10
VERBOSE_MODE = False
# tsig sign every N axfr packets.
TSIG_SIGN_EVERY_NTH = 96
XFROUT_MAX_MESSAGE_SIZE = 65535
-
def get_rrset_len(rrset):
"""Returns the wire length of the given RRset"""
bytes = bytearray()
@@ -96,7 +106,7 @@ def get_rrset_len(rrset):
class XfroutSession():
def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
- acl):
+ default_acl, zone_config):
self._sock_fd = sock_fd
self._request_data = request_data
self._server = server
@@ -104,7 +114,8 @@ class XfroutSession():
self._tsig_ctx = None
self._tsig_len = 0
self._remote = remote
- self._acl = acl
+ self._acl = default_acl
+ self._zone_config = zone_config
self.handle()
def create_tsig_ctx(self, tsig_record, tsig_key_ring):
@@ -140,34 +151,50 @@ class XfroutSession():
try:
msg = Message(Message.PARSE)
Message.from_wire(msg, mdata)
-
- # TSIG related checks
- rcode = self._check_request_tsig(msg, mdata)
-
- if rcode == Rcode.NOERROR():
- # ACL checks
- acl_result = self._acl.execute(
- isc.acl.dns.RequestContext(self._remote,
- msg.get_tsig_record()))
- if acl_result == DROP:
- logger.info(XFROUT_QUERY_DROPPED,
- self._get_query_zone_name(msg),
- self._get_query_zone_class(msg),
- self._remote[0], self._remote[1])
- return None, None
- elif acl_result == REJECT:
- logger.info(XFROUT_QUERY_REJECTED,
- self._get_query_zone_name(msg),
- self._get_query_zone_class(msg),
- self._remote[0], self._remote[1])
- return Rcode.REFUSED(), msg
-
- except Exception as err:
+ except Exception as err: # Exception is too broad
logger.error(XFROUT_PARSE_QUERY_ERROR, err)
return Rcode.FORMERR(), None
+ # TSIG related checks
+ rcode = self._check_request_tsig(msg, mdata)
+
+ if rcode == Rcode.NOERROR():
+ # ACL checks
+ zone_name = msg.get_question()[0].get_name()
+ zone_class = msg.get_question()[0].get_class()
+ acl = self._get_transfer_acl(zone_name, zone_class)
+ acl_result = acl.execute(
+ isc.acl.dns.RequestContext(self._remote,
+ msg.get_tsig_record()))
+ if acl_result == DROP:
+ logger.info(XFROUT_QUERY_DROPPED, zone_name, zone_class,
+ self._remote[0], self._remote[1])
+ return None, None
+ elif acl_result == REJECT:
+ logger.info(XFROUT_QUERY_REJECTED, zone_name, zone_class,
+ self._remote[0], self._remote[1])
+ return Rcode.REFUSED(), msg
+
return rcode, msg
+ def _get_transfer_acl(self, zone_name, zone_class):
+ '''Return the ACL that should be applied for a given zone.
+
+ The zone is identified by a tuple of name and RR class.
+ If a per zone configuration for the zone exists and contains
+ transfer_acl, that ACL will be used; otherwise, the default
+ ACL will be used.
+
+ '''
+ # Internally zone names are managed in lower cased label characters,
+ # so we first need to convert the name.
+ zone_name_lower = Name(zone_name.to_text(), True)
+ config_key = (zone_class.to_text(), zone_name_lower.to_text())
+ if config_key in self._zone_config and \
+ 'transfer_acl' in self._zone_config[config_key]:
+ return self._zone_config[config_key]['transfer_acl']
+ return self._acl
+
def _get_query_zone_name(self, msg):
question = msg.get_question()[0]
return question.get_name().to_text()
@@ -384,10 +411,12 @@ class XfroutSession():
self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len,
count_since_last_tsig_sign)
-class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
+class UnixSockServer(socketserver_mixin.NoPollMixIn,
+ ThreadingUnixStreamServer):
'''The unix domain socket server which accept xfr query sent from auth server.'''
- def __init__(self, sock_file, handle_class, shutdown_event, config_data, cc):
+ def __init__(self, sock_file, handle_class, shutdown_event, config_data,
+ cc):
self._remove_unused_sock_file(sock_file)
self._sock_file = sock_file
socketserver_mixin.NoPollMixIn.__init__(self)
@@ -395,16 +424,15 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
self._shutdown_event = shutdown_event
self._write_sock, self._read_sock = socket.socketpair()
self._common_init()
- self.update_config_data(config_data)
self._cc = cc
+ self.update_config_data(config_data)
def _common_init(self):
+ '''Initialization shared with the mock server class used for tests'''
self._lock = threading.Lock()
self._transfers_counter = 0
- # This default value will probably get overwritten by the (same)
- # default value from the spec file. This is here just to make
- # sure and to make the default value in tests consistent.
- self._acl = REQUEST_LOADER.load('[{"action": "ACCEPT"}]')
+ self._zone_config = {}
+ self._acl = None # this will be initialized in update_config_data()
def _receive_query_message(self, sock):
''' receive request message from sock'''
@@ -482,7 +510,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
if not request_data:
return
- t = threading.Thread(target = self.finish_request,
+ t = threading.Thread(target=self.finish_request,
args = (sock_fd, request_data))
if self.daemon_threads:
t.daemon = True
@@ -506,10 +534,17 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
return sock.getpeername()
def finish_request(self, sock_fd, request_data):
- '''Finish one request by instantiating RequestHandlerClass.'''
+ '''Finish one request by instantiating RequestHandlerClass.
+
+ This method creates a XfroutSession object.
+ '''
+ self._lock.acquire()
+ acl = self._acl
+ zone_config = self._zone_config
+ self._lock.release()
self.RequestHandlerClass(sock_fd, request_data, self,
self.tsig_key_ring,
- self._guess_remote(sock_fd), self._acl)
+ self._guess_remote(sock_fd), acl, zone_config)
def _remove_unused_sock_file(self, sock_file):
'''Try to remove the socket file. If the file is being used
@@ -551,16 +586,65 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
pass
def update_config_data(self, new_config):
- '''Apply the new config setting of xfrout module. '''
- logger.info(XFROUT_NEW_CONFIG)
- if 'query_acl' in new_config:
- self._acl = REQUEST_LOADER.load(new_config['query_acl'])
+ '''Apply the new config setting of xfrout module.
+
+ '''
self._lock.acquire()
- self._max_transfers_out = new_config.get('transfers_out')
- self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
+ try:
+ logger.info(XFROUT_NEW_CONFIG)
+ new_acl = self._acl
+ if 'transfer_acl' in new_config:
+ try:
+ new_acl = REQUEST_LOADER.load(new_config['transfer_acl'])
+ except LoaderError as e:
+ raise XfroutConfigError('Failed to parse transfer_acl: ' +
+ str(e))
+
+ new_zone_config = self._zone_config
+ zconfig_data = new_config.get('zone_config')
+ if zconfig_data is not None:
+ new_zone_config = self.__create_zone_config(zconfig_data)
+
+ self._acl = new_acl
+ self._zone_config = new_zone_config
+ self._max_transfers_out = new_config.get('transfers_out')
+ self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
+ except Exception as e:
+ self._lock.release()
+ raise e
self._lock.release()
logger.info(XFROUT_NEW_CONFIG_DONE)
+ def __create_zone_config(self, zone_config_list):
+ new_config = {}
+ for zconf in zone_config_list:
+ # convert the class, origin (name) pair. First build pydnspp
+ # object to reject invalid input.
+ zclass_str = zconf.get('class')
+ if zclass_str is None:
+ #zclass_str = 'IN' # temporary
+ zclass_str = self._cc.get_default_value('zone_config/class')
+ zclass = RRClass(zclass_str)
+ zorigin = Name(zconf['origin'], True)
+ config_key = (zclass.to_text(), zorigin.to_text())
+
+ # reject duplicate config
+ if config_key in new_config:
+ raise XfroutConfigError('Duplicate zone_config for ' +
+ str(zorigin) + '/' + str(zclass))
+
+ # create a new config entry, build any given (and known) config
+ new_config[config_key] = {}
+ if 'transfer_acl' in zconf:
+ try:
+ new_config[config_key]['transfer_acl'] = \
+ REQUEST_LOADER.load(zconf['transfer_acl'])
+ except LoaderError as e:
+ raise XfroutConfigError('Failed to parse transfer_acl ' +
+ 'for ' + zorigin.to_text() + '/' +
+ zclass_str + ': ' + str(e))
+ return new_config
+
def set_tsig_key_ring(self, key_list):
"""Set the tsig_key_ring , given a TSIG key string list representation. """
@@ -617,8 +701,10 @@ class XfroutServer:
def _start_xfr_query_listener(self):
'''Start a new thread to accept xfr query. '''
- self._unix_socket_server = UnixSockServer(self._listen_sock_file, XfroutSession,
- self._shutdown_event, self._config_data,
+ self._unix_socket_server = UnixSockServer(self._listen_sock_file,
+ XfroutSession,
+ self._shutdown_event,
+ self._config_data,
self._cc)
listener = threading.Thread(target=self._unix_socket_server.serve_forever)
listener.start()
@@ -726,6 +812,10 @@ if '__main__' == __name__:
logger.INFO(XFROUT_STOPPED_BY_KEYBOARD)
except SessionError as e:
logger.error(XFROUT_CC_SESSION_ERROR, str(e))
+ except ModuleCCSessionError as e:
+ logger.error(XFROUT_MODULECC_SESSION_ERROR, str(e))
+ except XfroutConfigError as e:
+ logger.error(XFROUT_CONFIG_ERROR, str(e))
except SessionTimeout as e:
logger.error(XFROUT_CC_SESSION_TIMEOUT_ERROR)
diff --git a/src/bin/xfrout/xfrout.spec.pre.in b/src/bin/xfrout/xfrout.spec.pre.in
index 8ecbb0b..0891a57 100644
--- a/src/bin/xfrout/xfrout.spec.pre.in
+++ b/src/bin/xfrout/xfrout.spec.pre.in
@@ -51,7 +51,7 @@
}
},
{
- "item_name": "query_acl",
+ "item_name": "transfer_acl",
"item_type": "list",
"item_optional": false,
"item_default": [{"action": "ACCEPT"}],
@@ -61,6 +61,45 @@
"item_type": "any",
"item_optional": true
}
+ },
+ {
+ "item_name": "zone_config",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [],
+ "list_item_spec":
+ {
+ "item_name": "zone_config_element",
+ "item_type": "map",
+ "item_optional": true,
+ "item_default": { "origin": "" },
+ "map_item_spec": [
+ {
+ "item_name": "origin",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ {
+ "item_name": "class",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "IN"
+ },
+ {
+ "item_name": "transfer_acl",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [{"action": "ACCEPT"}],
+ "list_item_spec":
+ {
+ "item_name": "acl_element",
+ "item_type": "any",
+ "item_optional": true
+ }
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index 121b2ad..b2e432c 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -47,6 +47,17 @@ a valid TSIG key.
There was a problem reading from the command and control channel. The
most likely cause is that the msgq daemon is not running.
+% XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1
+There was a problem in the lower level module handling configuration and
+control commands. This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization. A detailed error message from the module
+will also be displayed.
+
+% XFROUT_CONFIG_ERROR error found in configuration data: %1
+The xfrout process encountered an error when installing the configuration at
+startup time. Details of the error are included in the log message.
+
% XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response
There was a problem reading a response from another module over the
command and control channel. The most likely cause is that the
diff --git a/src/bin/zonemgr/Makefile.am b/src/bin/zonemgr/Makefile.am
index 34e6622..aa427fd 100644
--- a/src/bin/zonemgr/Makefile.am
+++ b/src/bin/zonemgr/Makefile.am
@@ -6,10 +6,13 @@ pkglibexec_SCRIPTS = b10-zonemgr
b10_zonemgrdir = $(pkgdatadir)
b10_zonemgr_DATA = zonemgr.spec
-pyexec_DATA = zonemgr_messages.py
+
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec
-CLEANFILES += zonemgr_messages.py zonemgr_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.pyc
man_MANS = b10-zonemgr.8
EXTRA_DIST = $(man_MANS) b10-zonemgr.xml zonemgr_messages.mes
@@ -22,13 +25,14 @@ b10-zonemgr.8: b10-zonemgr.xml
endif
# Build logging source file from message files
-zonemgr_messages.py: zonemgr_messages.mes
- $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/zonemgr/zonemgr_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py : zonemgr_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/zonemgr_messages.mes
zonemgr.spec: zonemgr.spec.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.spec.pre >$@
-b10-zonemgr: zonemgr.py
+b10-zonemgr: zonemgr.py $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.py >$@
chmod a+x $@
diff --git a/src/bin/zonemgr/tests/Makefile.am b/src/bin/zonemgr/tests/Makefile.am
index 97f9b5e..769d332 100644
--- a/src/bin/zonemgr/tests/Makefile.am
+++ b/src/bin/zonemgr/tests/Makefile.am
@@ -7,7 +7,7 @@ CLEANFILES = initdb.file
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -20,6 +20,6 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/zonemgr/tests/zonemgr_test.py b/src/bin/zonemgr/tests/zonemgr_test.py
index 496ce6b..80e41b3 100644
--- a/src/bin/zonemgr/tests/zonemgr_test.py
+++ b/src/bin/zonemgr/tests/zonemgr_test.py
@@ -152,6 +152,16 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertTrue((time1 + 3600 * (1 - self.zone_refresh._refresh_jitter)) <= zone_timeout)
self.assertTrue(zone_timeout <= time2 + 3600)
+ # No soa rdata
+ self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_soa_rdata"] = None
+ time3 = time.time()
+ self.zone_refresh._set_zone_retry_timer(ZONE_NAME_CLASS1_IN)
+ zone_timeout = self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["next_refresh_time"]
+ time4 = time.time()
+ self.assertTrue((time3 + self.zone_refresh._lowerbound_retry * (1 - self.zone_refresh._refresh_jitter))
+ <= zone_timeout)
+ self.assertTrue(zone_timeout <= time4 + self.zone_refresh._lowerbound_retry)
+
def test_zone_not_exist(self):
self.assertFalse(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS1_IN))
self.assertTrue(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS1_CH))
@@ -304,8 +314,8 @@ class TestZonemgrRefresh(unittest.TestCase):
def get_zone_soa2(zone_name, db_file):
return None
sqlite3_ds.get_zone_soa = get_zone_soa2
- self.assertRaises(ZonemgrException, self.zone_refresh.zonemgr_add_zone, \
- ZONE_NAME_CLASS1_IN)
+ self.zone_refresh.zonemgr_add_zone(ZONE_NAME_CLASS2_IN)
+ self.assertTrue(self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS2_IN]["zone_soa_rdata"] is None)
sqlite3_ds.get_zone_soa = old_get_zone_soa
def test_zone_handle_notify(self):
@@ -362,6 +372,15 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ZONE_NAME_CLASS3_CH)
self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ZONE_NAME_CLASS3_IN)
+ old_get_zone_soa = sqlite3_ds.get_zone_soa
+ def get_zone_soa(zone_name, db_file):
+ return None
+ sqlite3_ds.get_zone_soa = get_zone_soa
+ self.zone_refresh.zone_refresh_fail(ZONE_NAME_CLASS1_IN)
+ self.assertEqual(self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_state"],
+ ZONE_EXPIRED)
+ sqlite3_ds.get_zone_soa = old_get_zone_soa
+
def test_find_need_do_refresh_zone(self):
time1 = time.time()
self.zone_refresh._zonemgr_refresh_info = {
@@ -440,6 +459,8 @@ class TestZonemgrRefresh(unittest.TestCase):
"class": "IN" } ]
}
self.zone_refresh.update_config_data(config_data)
+ self.assertTrue(("example.net.", "IN") in
+ self.zone_refresh._zonemgr_refresh_info)
# update all values
config_data = {
@@ -479,14 +500,16 @@ class TestZonemgrRefresh(unittest.TestCase):
"secondary_zones": [ { "name": "doesnotexist",
"class": "IN" } ]
}
- self.assertRaises(ZonemgrException,
- self.zone_refresh.update_config_data,
- config_data)
- self.assertEqual(60, self.zone_refresh._lowerbound_refresh)
- self.assertEqual(30, self.zone_refresh._lowerbound_retry)
- self.assertEqual(19800, self.zone_refresh._max_transfer_timeout)
- self.assertEqual(0.25, self.zone_refresh._refresh_jitter)
- self.assertEqual(0.35, self.zone_refresh._reload_jitter)
+ self.zone_refresh.update_config_data(config_data)
+ name_class = ("doesnotexist.", "IN")
+ self.assertTrue(self.zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
+ is None)
+ # The other configs should be updated successfully
+ self.assertEqual(61, self.zone_refresh._lowerbound_refresh)
+ self.assertEqual(31, self.zone_refresh._lowerbound_retry)
+ self.assertEqual(19801, self.zone_refresh._max_transfer_timeout)
+ self.assertEqual(0.21, self.zone_refresh._refresh_jitter)
+ self.assertEqual(0.71, self.zone_refresh._reload_jitter)
# Make sure we accept 0 as a value
config_data = {
@@ -526,10 +549,11 @@ class TestZonemgrRefresh(unittest.TestCase):
self.zone_refresh._zonemgr_refresh_info)
# This one does not exist
config.set_zone_list_from_name_classes(["example.net", "CH"])
- self.assertRaises(ZonemgrException,
- self.zone_refresh.update_config_data, config)
- # So it should not affect the old ones
- self.assertTrue(("example.net.", "IN") in
+ self.zone_refresh.update_config_data(config)
+ self.assertFalse(("example.net.", "CH") in
+ self.zone_refresh._zonemgr_refresh_info)
+ # Simply skip loading soa for the zone, the other configs should be updated successful
+ self.assertFalse(("example.net.", "IN") in
self.zone_refresh._zonemgr_refresh_info)
# Make sure it works even when we "accidentally" forget the final dot
config.set_zone_list_from_name_classes([("example.net", "IN")])
@@ -596,15 +620,18 @@ class TestZonemgr(unittest.TestCase):
config_data3 = {"refresh_jitter" : 0.7}
self.zonemgr.config_handler(config_data3)
self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
- # The zone doesn't exist in database, it should be rejected
+ # The zone doesn't exist in database, simply skip loading soa for it and log an warning
self.zonemgr._zone_refresh = ZonemgrRefresh(None, "initdb.file", None,
config_data1)
config_data1["secondary_zones"] = [{"name": "nonexistent.example",
"class": "IN"}]
- self.assertNotEqual(self.zonemgr.config_handler(config_data1),
- {"result": [0]})
- # As it is rejected, the old value should be kept
- self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
+ self.assertEqual(self.zonemgr.config_handler(config_data1),
+ {"result": [0]})
+ # other configs should be updated successfully
+ name_class = ("nonexistent.example.", "IN")
+ self.assertTrue(self.zonemgr._zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
+ is None)
+ self.assertEqual(0.1, self.zonemgr._config_data.get("refresh_jitter"))
def test_get_db_file(self):
self.assertEqual("initdb.file", self.zonemgr.get_db_file())
diff --git a/src/bin/zonemgr/zonemgr.py.in b/src/bin/zonemgr/zonemgr.py.in
index 87a0092..5c8d9b5 100755
--- a/src/bin/zonemgr/zonemgr.py.in
+++ b/src/bin/zonemgr/zonemgr.py.in
@@ -37,7 +37,7 @@ from isc.datasrc import sqlite3_ds
from optparse import OptionParser, OptionValueError
from isc.config.ccsession import *
import isc.util.process
-from zonemgr_messages import *
+from isc.log_messages.zonemgr_messages import *
# Initialize logging for called modules.
isc.log.init("b10-zonemgr")
@@ -142,7 +142,10 @@ class ZonemgrRefresh:
"""Set zone next refresh time after zone refresh fail.
now + retry - retry_jitter <= next_refresh_time <= now + retry
"""
- zone_retry_time = float(self._get_zone_soa_rdata(zone_name_class).split(" ")[RETRY_OFFSET])
+ if (self._get_zone_soa_rdata(zone_name_class) is not None):
+ zone_retry_time = float(self._get_zone_soa_rdata(zone_name_class).split(" ")[RETRY_OFFSET])
+ else:
+ zone_retry_time = 0.0
zone_retry_time = max(self._lowerbound_retry, zone_retry_time)
self._set_zone_timer(zone_name_class, zone_retry_time, self._refresh_jitter * zone_retry_time)
@@ -174,7 +177,8 @@ class ZonemgrRefresh:
raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
"belong to zonemgr" % zone_name_class)
# Is zone expired?
- if (self._zone_is_expired(zone_name_class)):
+ if ((self._get_zone_soa_rdata(zone_name_class) is None) or
+ self._zone_is_expired(zone_name_class)):
self._set_zone_state(zone_name_class, ZONE_EXPIRED)
else:
self._set_zone_state(zone_name_class, ZONE_OK)
@@ -200,17 +204,19 @@ class ZonemgrRefresh:
logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_LOAD_ZONE, zone_name_class[0], zone_name_class[1])
zone_info = {}
zone_soa = sqlite3_ds.get_zone_soa(str(zone_name_class[0]), self._db_file)
- if not zone_soa:
- logger.error(ZONEMGR_NO_SOA, zone_name_class[0], zone_name_class[1])
- raise ZonemgrException("[b10-zonemgr] zone (%s, %s) doesn't have soa." % zone_name_class)
- zone_info["zone_soa_rdata"] = zone_soa[7]
+ if zone_soa is None:
+ logger.warn(ZONEMGR_NO_SOA, zone_name_class[0], zone_name_class[1])
+ zone_info["zone_soa_rdata"] = None
+ zone_reload_time = 0.0
+ else:
+ zone_info["zone_soa_rdata"] = zone_soa[7]
+ zone_reload_time = float(zone_soa[7].split(" ")[RETRY_OFFSET])
zone_info["zone_state"] = ZONE_OK
zone_info["last_refresh_time"] = self._get_current_time()
self._zonemgr_refresh_info[zone_name_class] = zone_info
# Imposes some random jitters to avoid many zones need to do refresh at the same time.
- zone_reload_jitter = float(zone_soa[7].split(" ")[RETRY_OFFSET])
- zone_reload_jitter = max(self._lowerbound_retry, zone_reload_jitter)
- self._set_zone_timer(zone_name_class, zone_reload_jitter, self._reload_jitter * zone_reload_jitter)
+ zone_reload_time = max(self._lowerbound_retry, zone_reload_time)
+ self._set_zone_timer(zone_name_class, zone_reload_time, self._reload_jitter * zone_reload_time)
def _zone_is_expired(self, zone_name_class):
"""Judge whether a zone is expired or not."""
@@ -420,12 +426,6 @@ class ZonemgrRefresh:
def update_config_data(self, new_config):
""" update ZonemgrRefresh config """
- # TODO: we probably want to store all this info in a nice
- # class, so that we don't have to backup and restore every
- # single value.
- # TODO2: We also don't use get_default_value yet
- backup = self._zonemgr_refresh_info.copy()
-
# Get a new value, but only if it is defined (commonly used below)
# We don't use "value or default", because if value would be
# 0, we would take default
@@ -435,26 +435,21 @@ class ZonemgrRefresh:
else:
return default
- # store the values so we can restore them if there is a problem
- lowerbound_refresh_backup = self._lowerbound_refresh
self._lowerbound_refresh = val_or_default(
new_config.get('lowerbound_refresh'), self._lowerbound_refresh)
- lowerbound_retry_backup = self._lowerbound_retry
self._lowerbound_retry = val_or_default(
new_config.get('lowerbound_retry'), self._lowerbound_retry)
- max_transfer_timeout_backup = self._max_transfer_timeout
self._max_transfer_timeout = val_or_default(
new_config.get('max_transfer_timeout'), self._max_transfer_timeout)
- refresh_jitter_backup = self._refresh_jitter
self._refresh_jitter = val_or_default(
new_config.get('refresh_jitter'), self._refresh_jitter)
- reload_jitter_backup = self._reload_jitter
self._reload_jitter = val_or_default(
new_config.get('reload_jitter'), self._reload_jitter)
+
try:
required = {}
secondary_zones = new_config.get('secondary_zones')
@@ -469,6 +464,7 @@ class ZonemgrRefresh:
required[name_class] = True
# Add it only if it isn't there already
if not name_class in self._zonemgr_refresh_info:
+ # If we are not able to find it in database, log an warning
self.zonemgr_add_zone(name_class)
# Drop the zones that are no longer there
# Do it in two phases, python doesn't like deleting while iterating
@@ -478,14 +474,7 @@ class ZonemgrRefresh:
to_drop.append(old_zone)
for drop in to_drop:
del self._zonemgr_refresh_info[drop]
- # If we are not able to find it in database, restore the original
except:
- self._zonemgr_refresh_info = backup
- self._lowerbound_refresh = lowerbound_refresh_backup
- self._lowerbound_retry = lowerbound_retry_backup
- self._max_transfer_timeout = max_transfer_timeout_backup
- self._refresh_jitter = refresh_jitter_backup
- self._reload_jitter = reload_jitter_backup
raise
class Zonemgr:
diff --git a/src/cppcheck-suppress.lst b/src/cppcheck-suppress.lst
index a4fea30..8a4c7c1 100644
--- a/src/cppcheck-suppress.lst
+++ b/src/cppcheck-suppress.lst
@@ -3,7 +3,7 @@
debug
missingInclude
// This is a template, and should be excluded from the check
-unreadVariable:src/lib/dns/rdata/template.cc:60
+unreadVariable:src/lib/dns/rdata/template.cc:61
// Intentional self assignment tests. Suppress warning about them.
selfAssignment:src/lib/dns/tests/name_unittest.cc:293
selfAssignment:src/lib/dns/tests/rdata_unittest.cc:228
diff --git a/src/lib/Makefile.am b/src/lib/Makefile.am
index 5adf150..04eee45 100644
--- a/src/lib/Makefile.am
+++ b/src/lib/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = exceptions util log cryptolink dns cc config acl python xfr \
- bench asiolink asiodns nsas cache resolve testutils datasrc \
- server_common
+SUBDIRS = exceptions util log cryptolink dns cc config acl xfr bench \
+ asiolink asiodns nsas cache resolve testutils datasrc \
+ server_common python
diff --git a/src/lib/asiolink/io_address.cc b/src/lib/asiolink/io_address.cc
index 7f7a6fc..7e2f5d4 100644
--- a/src/lib/asiolink/io_address.cc
+++ b/src/lib/asiolink/io_address.cc
@@ -63,5 +63,10 @@ IOAddress::getFamily() const {
}
}
+const asio::ip::address&
+IOAddress::getAddress() const {
+ return asio_address_;
+}
+
} // namespace asiolink
} // namespace isc
diff --git a/src/lib/asiolink/io_address.h b/src/lib/asiolink/io_address.h
index 655b727..1a42da7 100644
--- a/src/lib/asiolink/io_address.h
+++ b/src/lib/asiolink/io_address.h
@@ -74,6 +74,19 @@ public:
/// \return A string representation of the address.
std::string toText() const;
+ /// \brief Convert the address to a C-style null-terminated string.
+ ///
+ /// \return A string representation of the address.
+ operator const char*() const { return toText().c_str(); }
+
+ /// \brief Returns const reference to the underlying address object.
+ ///
+ /// This is useful, when access to interface offerted by
+ // asio::ip::address_v4 and asio::ip::address_v6 is beneficial.
+ ///
+ /// \return A const reference to asio::ip::address object
+ const asio::ip::address& getAddress() const;
+
/// \brief Returns the address family
///
/// \return AF_INET for IPv4 or AF_INET6 for IPv6.
diff --git a/src/lib/asiolink/tests/io_address_unittest.cc b/src/lib/asiolink/tests/io_address_unittest.cc
index 18b181e..b2a0dcc 100644
--- a/src/lib/asiolink/tests/io_address_unittest.cc
+++ b/src/lib/asiolink/tests/io_address_unittest.cc
@@ -18,11 +18,14 @@
#include <asiolink/io_error.h>
#include <asiolink/io_address.h>
+#include <cstring>
+
using namespace isc::asiolink;
TEST(IOAddressTest, fromText) {
IOAddress io_address_v4("192.0.2.1");
EXPECT_EQ("192.0.2.1", io_address_v4.toText());
+ EXPECT_EQ(0, strcmp("192.0.2.1", (const char *)io_address_v4));
IOAddress io_address_v6("2001:db8::1234");
EXPECT_EQ("2001:db8::1234", io_address_v6.toText());
diff --git a/src/lib/asiolink/tests/io_endpoint_unittest.cc b/src/lib/asiolink/tests/io_endpoint_unittest.cc
index f0279d1..c7283ec 100644
--- a/src/lib/asiolink/tests/io_endpoint_unittest.cc
+++ b/src/lib/asiolink/tests/io_endpoint_unittest.cc
@@ -219,7 +219,7 @@ sockAddrMatch(const struct sockaddr& actual_sa,
res->ai_addr->sa_len = actual_sa.sa_len;
#endif
EXPECT_EQ(0, memcmp(res->ai_addr, &actual_sa, res->ai_addrlen));
- free(res);
+ freeaddrinfo(res);
}
TEST(IOEndpointTest, getSockAddr) {
diff --git a/src/lib/cache/cache_messages.mes b/src/lib/cache/cache_messages.mes
index 2a68cc2..19102ae 100644
--- a/src/lib/cache/cache_messages.mes
+++ b/src/lib/cache/cache_messages.mes
@@ -124,14 +124,14 @@ the message will not be cached.
Debug message. The requested data was found in the RRset cache. However, it is
expired, so the cache removed it and is going to pretend nothing was found.
-% CACHE_RRSET_INIT initializing RRset cache for %2 RRsets of class %1
+% CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2
Debug message. The RRset cache to hold at most this many RRsets for the given
class is being created.
% CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache
Debug message. The resolver is trying to look up data in the RRset cache.
-% CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3
+% CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache
Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
in the cache.
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index db67781..5e193d2 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -9,7 +9,7 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
-lib_LTLIBRARIES = libdatasrc.la
+lib_LTLIBRARIES = libdatasrc.la sqlite3_ds.la memory_ds.la
libdatasrc_la_SOURCES = data_source.h data_source.cc
libdatasrc_la_SOURCES += static_datasrc.h static_datasrc.cc
libdatasrc_la_SOURCES += sqlite3_datasrc.h sqlite3_datasrc.cc
@@ -17,19 +17,26 @@ libdatasrc_la_SOURCES += query.h query.cc
libdatasrc_la_SOURCES += cache.h cache.cc
libdatasrc_la_SOURCES += rbtree.h
libdatasrc_la_SOURCES += zonetable.h zonetable.cc
-libdatasrc_la_SOURCES += memory_datasrc.h memory_datasrc.cc
libdatasrc_la_SOURCES += zone.h
libdatasrc_la_SOURCES += result.h
libdatasrc_la_SOURCES += logger.h logger.cc
-libdatasrc_la_SOURCES += client.h
+libdatasrc_la_SOURCES += client.h iterator.h
libdatasrc_la_SOURCES += database.h database.cc
-libdatasrc_la_SOURCES += sqlite3_accessor.h sqlite3_accessor.cc
+#libdatasrc_la_SOURCES += sqlite3_accessor.h sqlite3_accessor.cc
+libdatasrc_la_SOURCES += factory.h factory.cc
nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
+sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc
+sqlite3_ds_la_LDFLAGS = -module
+
+memory_ds_la_SOURCES = memory_datasrc.h memory_datasrc.cc
+memory_ds_la_LDFLAGS = -module
+
libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libdatasrc_la_LIBADD += $(SQLITE_LIBS)
BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index 9fe6519..40b7a3f 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -16,12 +16,60 @@
#define __DATA_SOURCE_CLIENT_H 1
#include <boost/noncopyable.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <exceptions/exceptions.h>
#include <datasrc/zone.h>
+/// \file
+/// Datasource clients
+///
+/// The data source client API is specified in client.h, and provides the
+/// functionality to query and modify data in the data sources. There are
+/// multiple datasource implementations, and by subclassing DataSourceClient or
+/// DatabaseClient, more can be added.
+///
+/// All datasources are implemented as loadable modules, with a name of the
+/// form "<type>_ds.so". This has been chosen intentionally, to minimize
+/// confusion and potential mistakes.
+///
+/// In order to use a datasource client backend, the class
+/// DataSourceClientContainer is provided in factory.h; this will load the
+/// library, set up the instance, and clean everything up once it is destroyed.
+///
+/// Access to the actual instance is provided with the getInstance() method
+/// in DataSourceClientContainer
+///
+/// \note Depending on actual usage, we might consider making the container
+/// a transparent abstraction layer, so it can be used as a DataSourceClient
+/// directly. This has some other implications though so for now the only access
+/// provided is through getInstance()).
+///
+/// For datasource backends, we use a dynamically loaded library system (with
+/// dlopen()). This library must contain the following things;
+/// - A subclass of DataSourceClient or DatabaseClient (which itself is a
+/// subclass of DataSourceClient)
+/// - A creator function for an instance of that subclass, of the form:
+/// \code
+/// extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr cfg);
+/// \endcode
+/// - A destructor for said instance, of the form:
+/// \code
+/// extern "C" void destroyInstance(isc::data::DataSourceClient* instance);
+/// \endcode
+///
+/// See the documentation for the \link DataSourceClient \endlink class for
+/// more information on implementing subclasses of it.
+///
+
namespace isc {
namespace datasrc {
+// The iterator.h is not included on purpose, most application won't need it
+class ZoneIterator;
+typedef boost::shared_ptr<ZoneIterator> ZoneIteratorPtr;
+
/// \brief The base class of data source clients.
///
/// This is an abstract base class that defines the common interface for
@@ -32,6 +80,9 @@ namespace datasrc {
/// operations to other classes; in general methods of this class act as
/// factories of these other classes.
///
+/// See \link datasrc/client.h datasrc/client.h \endlink for more information
+/// on adding datasource implementations.
+///
/// The following derived classes are currently (expected to be) provided:
/// - \c InMemoryClient: A client of a conceptual data source that stores
/// all necessary data in memory for faster lookups
@@ -73,8 +124,8 @@ namespace datasrc {
/// disruption with a naive copy it's prohibited explicitly. For the expected
/// usage of the client classes the restriction should be acceptable.
///
-/// \todo This class is not complete. It needs more factory methods, for
-/// accessing the whole zone, updating it, loading it, etc.
+/// \todo This class is still not complete. It will need more factory methods,
+/// e.g. for (re)loading a zone.
class DataSourceClient : boost::noncopyable {
public:
/// \brief A helper structure to represent the search result of
@@ -143,6 +194,95 @@ public:
/// \param name A domain name for which the search is performed.
/// \return A \c FindResult object enclosing the search result (see above).
virtual FindResult findZone(const isc::dns::Name& name) const = 0;
+
+ /// \brief Returns an iterator to the given zone
+ ///
+ /// This allows for traversing the whole zone. The returned object can
+ /// provide the RRsets one by one.
+ ///
+ /// This throws DataSourceError when the zone does not exist in the
+ /// datasource.
+ ///
+ /// The default implementation throws isc::NotImplemented. This allows
+ /// for easy and fast deployment of minimal custom data sources, where
+ /// the user/implementator doesn't have to care about anything else but
+ /// the actual queries. Also, in some cases, it isn't possible to traverse
+ /// the zone from logic point of view (eg. dynamically generated zone
+ /// data).
+ ///
+ /// It is not fixed if a concrete implementation of this method can throw
+ /// anything else.
+ ///
+ /// \param name The name of zone apex to be traversed. It doesn't do
+ /// nearest match as findZone.
+ /// \return Pointer to the iterator.
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const {
+ // This is here to both document the parameter in doxygen (therefore it
+ // needs a name) and avoid unused parameter warning.
+ static_cast<void>(name);
+
+ isc_throw(isc::NotImplemented,
+ "Data source doesn't support iteration");
+ }
+
+ /// Return an updater to make updates to a specific zone.
+ ///
+ /// The RR class of the zone is the one that the client is expected to
+ /// handle (see the detailed description of this class).
+ ///
+ /// If the specified zone is not found via the client, a NULL pointer
+ /// will be returned; in other words a completely new zone cannot be
+ /// created using an updater. It must be created beforehand (even if
+ /// it's an empty placeholder) in a way specific to the underlying data
+ /// source.
+ ///
+ /// Conceptually, the updater will trigger a separate transaction for
+ /// subsequent updates to the zone within the context of the updater
+ /// (the actual implementation of the "transaction" may vary for the
+ /// specific underlying data source). Until \c commit() is performed
+ /// on the updater, the intermediate updates won't affect the results
+ /// of other methods (and the result of the object's methods created
+ /// by other factory methods). Likewise, if the updater is destructed
+ /// without performing \c commit(), the intermediate updates will be
+ /// effectively canceled and will never affect other methods.
+ ///
+ /// If the underlying data source allows concurrent updates, this method
+ /// can be called multiple times while the previously returned updater(s)
+ /// are still active. In this case each updater triggers a different
+ /// "transaction". Normally it would be for different zones for such a
+ /// case as handling multiple incoming AXFR streams concurrently, but
+ /// this interface does not even prohibit an attempt of getting more than
+ /// one updater for the same zone, as long as the underlying data source
+ /// allows such an operation (and any conflict resolution is left to the
+ /// specific derived class implementation).
+ ///
+ /// If \c replace is true, any existing RRs of the zone will be
+ /// deleted on successful completion of updates (after \c commit() on
+ /// the updater); if it's false, the existing RRs will be
+ /// intact unless explicitly deleted by \c deleteRRset() on the updater.
+ ///
+ /// A data source can be "read only" or can prohibit partial updates.
+ /// In such cases this method will result in an \c isc::NotImplemented
+ /// exception unconditionally or when \c replace is false).
+ ///
+ /// \note To avoid throwing the exception accidentally with a lazy
+ /// implementation, we still keep this method pure virtual without
+ /// an implementation. All derived classes must explicitly define this
+ /// method, even if it simply throws the NotImplemented exception.
+ ///
+ /// \exception NotImplemented The underlying data source does not support
+ /// updates.
+ /// \exception DataSourceError Internal error in the underlying data
+ /// source.
+ /// \exception std::bad_alloc Resource allocation failure.
+ ///
+ /// \param name The zone name to be updated
+ /// \param replace Whether to delete existing RRs before making updates
+ ///
+ /// \return A pointer to the updater; it will be NULL if the specified
+ /// zone isn't found.
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+ bool replace) const = 0;
};
}
}
diff --git a/src/lib/datasrc/data_source.h b/src/lib/datasrc/data_source.h
index ff695da..a7a15a9 100644
--- a/src/lib/datasrc/data_source.h
+++ b/src/lib/datasrc/data_source.h
@@ -184,9 +184,9 @@ public:
void setClass(isc::dns::RRClass& c) { rrclass = c; }
void setClass(const isc::dns::RRClass& c) { rrclass = c; }
- Result init() { return (NOT_IMPLEMENTED); }
- Result init(isc::data::ConstElementPtr config);
- Result close() { return (NOT_IMPLEMENTED); }
+ virtual Result init() { return (NOT_IMPLEMENTED); }
+ virtual Result init(isc::data::ConstElementPtr config);
+ virtual Result close() { return (NOT_IMPLEMENTED); }
virtual Result findRRset(const isc::dns::Name& qname,
const isc::dns::RRClass& qclass,
@@ -351,7 +351,7 @@ public:
/// \brief Returns the best enclosing zone name found for the given
// name and RR class so far.
- ///
+ ///
/// \return A pointer to the zone apex \c Name, NULL if none found yet.
///
/// This method never throws an exception.
@@ -413,6 +413,6 @@ private:
#endif
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index d2202cd..e476297 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -12,13 +12,18 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <string>
#include <vector>
#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
#include <exceptions/exceptions.h>
#include <dns/name.h>
+#include <dns/rrclass.h>
#include <dns/rrttl.h>
+#include <dns/rrset.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
@@ -27,16 +32,20 @@
#include <boost/foreach.hpp>
-using isc::dns::Name;
+using namespace isc::dns;
+using namespace std;
+using boost::shared_ptr;
+using namespace isc::dns::rdata;
namespace isc {
namespace datasrc {
-DatabaseClient::DatabaseClient(boost::shared_ptr<DatabaseAccessor>
- database) :
- database_(database)
+DatabaseClient::DatabaseClient(RRClass rrclass,
+ boost::shared_ptr<DatabaseAccessor>
+ accessor) :
+ rrclass_(rrclass), accessor_(accessor)
{
- if (database_.get() == NULL) {
+ if (!accessor_) {
isc_throw(isc::InvalidParameter,
"No database provided to DatabaseClient");
}
@@ -44,31 +53,34 @@ DatabaseClient::DatabaseClient(boost::shared_ptr<DatabaseAccessor>
DataSourceClient::FindResult
DatabaseClient::findZone(const Name& name) const {
- std::pair<bool, int> zone(database_->getZone(name));
+ std::pair<bool, int> zone(accessor_->getZone(name.toText()));
// Try exact first
if (zone.first) {
return (FindResult(result::SUCCESS,
- ZoneFinderPtr(new Finder(database_,
- zone.second))));
+ ZoneFinderPtr(new Finder(accessor_,
+ zone.second, name))));
}
- // Than super domains
+ // Then super domains
// Start from 1, as 0 is covered above
for (size_t i(1); i < name.getLabelCount(); ++i) {
- zone = database_->getZone(name.split(i));
+ isc::dns::Name superdomain(name.split(i));
+ zone = accessor_->getZone(superdomain.toText());
if (zone.first) {
return (FindResult(result::PARTIALMATCH,
- ZoneFinderPtr(new Finder(database_,
- zone.second))));
+ ZoneFinderPtr(new Finder(accessor_,
+ zone.second,
+ superdomain))));
}
}
// No, really nothing
return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
}
-DatabaseClient::Finder::Finder(boost::shared_ptr<DatabaseAccessor>
- database, int zone_id) :
- database_(database),
- zone_id_(zone_id)
+DatabaseClient::Finder::Finder(boost::shared_ptr<DatabaseAccessor> accessor,
+ int zone_id, const isc::dns::Name& origin) :
+ accessor_(accessor),
+ zone_id_(zone_id),
+ origin_(origin)
{ }
namespace {
@@ -105,7 +117,7 @@ void addOrCreate(isc::dns::RRsetPtr& rrset,
if (ttl < rrset->getTTL()) {
rrset->setTTL(ttl);
}
- logger.info(DATASRC_DATABASE_FIND_TTL_MISMATCH)
+ logger.warn(DATASRC_DATABASE_FIND_TTL_MISMATCH)
.arg(db.getDBName()).arg(name).arg(cls)
.arg(type).arg(rrset->getTTL());
}
@@ -162,37 +174,56 @@ private:
};
}
-
-ZoneFinder::FindResult
-DatabaseClient::Finder::find(const isc::dns::Name& name,
- const isc::dns::RRType& type,
- isc::dns::RRsetList*,
- const FindOptions)
+DatabaseClient::Finder::FoundRRsets
+DatabaseClient::Finder::getRRsets(const string& name, const WantedTypes& types,
+ bool check_ns, const string* construct_name)
{
- // This variable is used to determine the difference between
- // NXDOMAIN and NXRRSET
- bool records_found = false;
- isc::dns::RRsetPtr result_rrset;
- ZoneFinder::Result result_status = SUCCESS;
RRsigStore sig_store;
- logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
- .arg(database_->getDBName()).arg(name).arg(type);
+ bool records_found = false;
+ std::map<RRType, RRsetPtr> result;
- try {
- database_->searchForRecords(zone_id_, name.toText());
+ // Request the context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor_->getRecords(name, zone_id_));
+ // It must not return NULL, that's a bug of the implementation
+ if (!context) {
+ isc_throw(isc::Unexpected, "Iterator context null at " + name);
+ }
- std::string columns[DatabaseAccessor::COLUMN_COUNT];
- while (database_->getNextRecord(columns,
- DatabaseAccessor::COLUMN_COUNT)) {
- if (!records_found) {
- records_found = true;
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ if (construct_name == NULL) {
+ construct_name = &name;
+ }
+
+ const Name construct_name_object(*construct_name);
+
+ bool seen_cname(false);
+ bool seen_ds(false);
+ bool seen_other(false);
+ bool seen_ns(false);
+
+ while (context->getNext(columns)) {
+ // The domain is not empty
+ records_found = true;
+
+ try {
+ const RRType cur_type(columns[DatabaseAccessor::TYPE_COLUMN]);
+
+ if (cur_type == RRType::RRSIG()) {
+ // If we get signatures before we get the actual data, we
+ // can't know which ones to keep and which to drop...
+ // So we keep a separate store of any signature that may be
+ // relevant and add them to the final RRset when we are
+ // done.
+ // A possible optimization here is to not store them for
+ // types we are certain we don't need
+ sig_store.addSig(rdata::createRdata(cur_type, getClass(),
+ columns[DatabaseAccessor::RDATA_COLUMN]));
}
- try {
- const isc::dns::RRType cur_type(columns[DatabaseAccessor::
- TYPE_COLUMN]);
- const isc::dns::RRTTL cur_ttl(columns[DatabaseAccessor::
- TTL_COLUMN]);
+ if (types.find(cur_type) != types.end()) {
+ // This type is requested, so put it into result
+ const RRTTL cur_ttl(columns[DatabaseAccessor::TTL_COLUMN]);
// Ths sigtype column was an optimization for finding the
// relevant RRSIG RRs for a lookup. Currently this column is
// not used in this revised datasource implementation. We
@@ -204,102 +235,450 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
// contains an rrtype that is different from the actual value
// of the 'type covered' field in the RRSIG Rdata).
//cur_sigtype(columns[SIGTYPE_COLUMN]);
+ addOrCreate(result[cur_type], construct_name_object,
+ getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *accessor_);
+ }
- if (cur_type == type) {
- if (result_rrset &&
- result_rrset->getType() == isc::dns::RRType::CNAME()) {
- isc_throw(DataSourceError, "CNAME found but it is not "
- "the only record for " + name.toText());
- }
- addOrCreate(result_rrset, name, getClass(), cur_type,
- cur_ttl, columns[DatabaseAccessor::
- RDATA_COLUMN],
- *database_);
- } else if (cur_type == isc::dns::RRType::CNAME()) {
- // There should be no other data, so result_rrset should
- // be empty.
- if (result_rrset) {
- isc_throw(DataSourceError, "CNAME found but it is not "
- "the only record for " + name.toText());
+ if (cur_type == RRType::CNAME()) {
+ seen_cname = true;
+ } else if (cur_type == RRType::NS()) {
+ seen_ns = true;
+ } else if (cur_type == RRType::DS()) {
+ seen_ds = true;
+ } else if (cur_type != RRType::RRSIG() &&
+ cur_type != RRType::NSEC3() &&
+ cur_type != RRType::NSEC()) {
+ // NSEC and RRSIG can coexist with anything, otherwise
+ // we've seen something that can't live together with potential
+ // CNAME or NS
+ //
+ // NSEC3 lives in separate namespace from everything, therefore
+ // we just ignore it here for these checks as well.
+ seen_other = true;
+ }
+ } catch (const InvalidRRType&) {
+ isc_throw(DataSourceError, "Invalid RRType in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ TYPE_COLUMN]);
+ } catch (const InvalidRRTTL&) {
+ isc_throw(DataSourceError, "Invalid TTL in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ TTL_COLUMN]);
+ } catch (const rdata::InvalidRdataText&) {
+ isc_throw(DataSourceError, "Invalid rdata in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ RDATA_COLUMN]);
+ }
+ }
+ if (seen_cname && (seen_other || seen_ns || seen_ds)) {
+ isc_throw(DataSourceError, "CNAME shares domain " << name <<
+ " with something else");
+ }
+ if (check_ns && seen_ns && seen_other) {
+ isc_throw(DataSourceError, "NS shares domain " << name <<
+ " with something else");
+ }
+ // Add signatures to all found RRsets
+ for (std::map<RRType, RRsetPtr>::iterator i(result.begin());
+ i != result.end(); ++ i) {
+ sig_store.appendSignatures(i->second);
+ }
+
+ return (FoundRRsets(records_found, result));
+}
+
+bool
+DatabaseClient::Finder::hasSubdomains(const std::string& name) {
+ // Request the context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor_->getRecords(name, zone_id_, true));
+ // It must not return NULL, that's a bug of the implementation
+ if (!context) {
+ isc_throw(isc::Unexpected, "Iterator context null at " + name);
+ }
+
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ return (context->getNext(columns));
+}
+
+// Some manipulation with RRType sets
+namespace {
+
+// Bunch of functions to construct specific sets of RRTypes we will
+// ask from it.
+typedef std::set<RRType> WantedTypes;
+
+const WantedTypes&
+NSEC_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::NSEC());
+ initialized = true;
+ }
+ return (result);
+}
+
+const WantedTypes&
+DELEGATION_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::DNAME());
+ result.insert(RRType::NS());
+ initialized = true;
+ }
+ return (result);
+}
+
+const WantedTypes&
+FINAL_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::CNAME());
+ result.insert(RRType::NS());
+ result.insert(RRType::NSEC());
+ initialized = true;
+ }
+ return (result);
+}
+
+}
+
+RRsetPtr
+DatabaseClient::Finder::findNSECCover(const Name& name) {
+ try {
+ // Which one should contain the NSEC record?
+ const Name coverName(findPreviousName(name));
+ // Get the record and copy it out
+ const FoundRRsets found = getRRsets(coverName.toText(), NSEC_TYPES(),
+ coverName != getOrigin());
+ const FoundIterator
+ nci(found.second.find(RRType::NSEC()));
+ if (nci != found.second.end()) {
+ return (nci->second);
+ } else {
+ // The previous doesn't contain NSEC.
+ // Badly signed zone or a bug?
+
+ // FIXME: Currently, if the zone is not signed, we could get
+ // here. In that case we can't really throw, but for now, we can't
+ // recognize it. So we don't throw at all, enable it once
+ // we have a is_signed flag or something.
+#if 0
+ isc_throw(DataSourceError, "No NSEC in " +
+ coverName.toText() + ", but it was "
+ "returned as previous - "
+ "accessor error? Badly signed zone?");
+#endif
+ }
+ }
+ catch (const isc::NotImplemented&) {
+ // Well, they want DNSSEC, but there is no available.
+ // So we don't provide anything.
+ LOG_INFO(logger, DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED).
+ arg(accessor_->getDBName()).arg(name);
+ }
+ // We didn't find it, return nothing
+ return (RRsetPtr());
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList*,
+ const FindOptions options)
+{
+ // This variable is used to determine the difference between
+ // NXDOMAIN and NXRRSET
+ bool records_found = false;
+ bool glue_ok((options & FIND_GLUE_OK) != 0);
+ const bool dnssec_data((options & FIND_DNSSEC) != 0);
+ bool get_cover(false);
+ isc::dns::RRsetPtr result_rrset;
+ ZoneFinder::Result result_status = SUCCESS;
+ FoundRRsets found;
+ logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
+ .arg(accessor_->getDBName()).arg(name).arg(type);
+ // In case we are in GLUE_OK mode and start matching wildcards,
+ // we can't do it under NS, so we store it here to check
+ isc::dns::RRsetPtr first_ns;
+
+ // First, do we have any kind of delegation (NS/DNAME) here?
+ const Name origin(getOrigin());
+ const size_t origin_label_count(origin.getLabelCount());
+ // Number of labels in the last known non-empty domain
+ size_t last_known(origin_label_count);
+ const size_t current_label_count(name.getLabelCount());
+ // This is how many labels we remove to get origin
+ size_t remove_labels(current_label_count - origin_label_count);
+
+ // Now go trough all superdomains from origin down
+ for (int i(remove_labels); i > 0; --i) {
+ Name superdomain(name.split(i));
+ // Look if there's NS or DNAME (but ignore the NS in origin)
+ found = getRRsets(superdomain.toText(), DELEGATION_TYPES(),
+ i != remove_labels);
+ if (found.first) {
+ // It contains some RRs, so it exists.
+ last_known = superdomain.getLabelCount();
+
+ const FoundIterator nsi(found.second.find(RRType::NS()));
+ const FoundIterator dni(found.second.find(RRType::DNAME()));
+ // In case we are in GLUE_OK mode, we want to store the
+ // highest encountered NS (but not apex)
+ if (glue_ok && !first_ns && i != remove_labels &&
+ nsi != found.second.end()) {
+ first_ns = nsi->second;
+ } else if (!glue_ok && i != remove_labels &&
+ nsi != found.second.end()) {
+ // Do a NS delegation, but ignore NS in glue_ok mode. Ignore
+ // delegation in apex
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DELEGATION).
+ arg(accessor_->getDBName()).arg(superdomain);
+ result_rrset = nsi->second;
+ result_status = DELEGATION;
+ // No need to go lower, found
+ break;
+ } else if (dni != found.second.end()) {
+ // Very similar with DNAME
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DNAME).
+ arg(accessor_->getDBName()).arg(superdomain);
+ result_rrset = dni->second;
+ result_status = DNAME;
+ if (result_rrset->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "DNAME at " << superdomain <<
+ " has " << result_rrset->getRdataCount() <<
+ " rdata, 1 expected");
+ }
+ break;
+ }
+ }
+ }
+
+ if (!result_rrset) { // Only if we didn't find a redirect already
+ // Try getting the final result and extract it
+ // It is special if there's a CNAME or NS, DNAME is ignored here
+ // And we don't consider the NS in origin
+
+ WantedTypes final_types(FINAL_TYPES());
+ final_types.insert(type);
+ found = getRRsets(name.toText(), final_types, name != origin);
+ records_found = found.first;
+
+ // NS records, CNAME record and Wanted Type records
+ const FoundIterator nsi(found.second.find(RRType::NS()));
+ const FoundIterator cni(found.second.find(RRType::CNAME()));
+ const FoundIterator wti(found.second.find(type));
+ if (name != origin && !glue_ok && nsi != found.second.end()) {
+ // There's a delegation at the exact node.
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DELEGATION_EXACT).
+ arg(accessor_->getDBName()).arg(name);
+ result_status = DELEGATION;
+ result_rrset = nsi->second;
+ } else if (type != isc::dns::RRType::CNAME() &&
+ cni != found.second.end()) {
+ // A CNAME here
+ result_status = CNAME;
+ result_rrset = cni->second;
+ if (result_rrset->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "CNAME with " <<
+ result_rrset->getRdataCount() <<
+ " rdata at " << name << ", expected 1");
+ }
+ } else if (wti != found.second.end()) {
+ // Just get the answer
+ result_rrset = wti->second;
+ } else if (!records_found) {
+ // Nothing lives here.
+ // But check if something lives below this
+ // domain and if so, pretend something is here as well.
+ if (hasSubdomains(name.toText())) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
+ arg(accessor_->getDBName()).arg(name);
+ records_found = true;
+ get_cover = dnssec_data;
+ } else {
+ // It's not empty non-terminal. So check for wildcards.
+ // We remove labels one by one and look for the wildcard there.
+ // Go up to first non-empty domain.
+
+ remove_labels = current_label_count - last_known;
+ for (size_t i(1); i <= remove_labels; ++ i) {
+ // Construct the name with *
+ const Name superdomain(name.split(i));
+ const string wildcard("*." + superdomain.toText());
+ const string construct_name(name.toText());
+ // TODO What do we do about DNAME here?
+ // The types are the same as with original query
+ found = getRRsets(wildcard, final_types, true,
+ &construct_name);
+ if (found.first) {
+ if (first_ns) {
+ // In case we are under NS, we don't
+ // wildcard-match, but return delegation
+ result_rrset = first_ns;
+ result_status = DELEGATION;
+ records_found = true;
+ // We pretend to switch to non-glue_ok mode
+ glue_ok = false;
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_CANCEL_NS).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(first_ns->getName());
+ } else if (!hasSubdomains(name.split(i - 1).toText()))
+ {
+ // Nothing we added as part of the * can exist
+ // directly, as we go up only to first existing
+ // domain, but it could be empty non-terminal. In
+ // that case, we need to cancel the match.
+ records_found = true;
+ const FoundIterator
+ cni(found.second.find(RRType::CNAME()));
+ const FoundIterator
+ nsi(found.second.find(RRType::NS()));
+ const FoundIterator
+ nci(found.second.find(RRType::NSEC()));
+ const FoundIterator wti(found.second.find(type));
+ if (cni != found.second.end() &&
+ type != RRType::CNAME()) {
+ result_rrset = cni->second;
+ result_status = CNAME;
+ } else if (nsi != found.second.end()) {
+ result_rrset = nsi->second;
+ result_status = DELEGATION;
+ } else if (wti != found.second.end()) {
+ result_rrset = wti->second;
+ result_status = WILDCARD;
+ } else {
+ // NXRRSET case in the wildcard
+ result_status = WILDCARD_NXRRSET;
+ if (dnssec_data &&
+ nci != found.second.end()) {
+ // User wants a proof the wildcard doesn't
+ // contain it
+ //
+ // However, we need to get the RRset in the
+ // name of the wildcard, not the constructed
+ // one, so we walk it again
+ found = getRRsets(wildcard, NSEC_TYPES(),
+ true);
+ result_rrset =
+ found.second.find(RRType::NSEC())->
+ second;
+ }
+ }
+
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name);
+ } else {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_CANCEL_SUB).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name).arg(superdomain);
+ }
+ break;
+ } else if (hasSubdomains(wildcard)) {
+ // Empty non-terminal asterisk
+ records_found = true;
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_EMPTY).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name);
+ if (dnssec_data) {
+ result_rrset = findNSECCover(Name(wildcard));
+ if (result_rrset) {
+ result_status = WILDCARD_NXRRSET;
+ }
+ }
+ break;
}
- addOrCreate(result_rrset, name, getClass(), cur_type,
- cur_ttl, columns[DatabaseAccessor::
- RDATA_COLUMN],
- *database_);
- result_status = CNAME;
- } else if (cur_type == isc::dns::RRType::RRSIG()) {
- // If we get signatures before we get the actual data, we
- // can't know which ones to keep and which to drop...
- // So we keep a separate store of any signature that may be
- // relevant and add them to the final RRset when we are
- // done.
- // A possible optimization here is to not store them for
- // types we are certain we don't need
- sig_store.addSig(isc::dns::rdata::createRdata(cur_type,
- getClass(),
- columns[DatabaseAccessor::
- RDATA_COLUMN]));
}
- } catch (const isc::dns::InvalidRRType& irt) {
- isc_throw(DataSourceError, "Invalid RRType in database for " <<
- name << ": " << columns[DatabaseAccessor::
- TYPE_COLUMN]);
- } catch (const isc::dns::InvalidRRTTL& irttl) {
- isc_throw(DataSourceError, "Invalid TTL in database for " <<
- name << ": " << columns[DatabaseAccessor::
- TTL_COLUMN]);
- } catch (const isc::dns::rdata::InvalidRdataText& ird) {
- isc_throw(DataSourceError, "Invalid rdata in database for " <<
- name << ": " << columns[DatabaseAccessor::
- RDATA_COLUMN]);
+ // This is the NXDOMAIN case (nothing found anywhere). If
+ // they want DNSSEC data, try getting the NSEC record
+ if (dnssec_data && !records_found) {
+ get_cover = true;
+ }
+ }
+ } else if (dnssec_data) {
+ // This is the "usual" NXRRSET case
+ // So in case they want DNSSEC, provide the NSEC
+ // (which should be available already here)
+ result_status = NXRRSET;
+ const FoundIterator nci(found.second.find(RRType::NSEC()));
+ if (nci != found.second.end()) {
+ result_rrset = nci->second;
}
}
- } catch (const DataSourceError& dse) {
- logger.error(DATASRC_DATABASE_FIND_ERROR)
- .arg(database_->getDBName()).arg(dse.what());
- // call cleanup and rethrow
- database_->resetSearch();
- throw;
- } catch (const isc::Exception& isce) {
- logger.error(DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR)
- .arg(database_->getDBName()).arg(isce.what());
- // cleanup, change it to a DataSourceError and rethrow
- database_->resetSearch();
- isc_throw(DataSourceError, isce.what());
- } catch (const std::exception& ex) {
- logger.error(DATASRC_DATABASE_FIND_UNCAUGHT_ERROR)
- .arg(database_->getDBName()).arg(ex.what());
- database_->resetSearch();
- throw;
}
if (!result_rrset) {
- if (records_found) {
- logger.debug(DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_NXRRSET)
- .arg(database_->getDBName()).arg(name)
- .arg(getClass()).arg(type);
- result_status = NXRRSET;
- } else {
- logger.debug(DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_NXDOMAIN)
- .arg(database_->getDBName()).arg(name)
- .arg(getClass()).arg(type);
- result_status = NXDOMAIN;
+ if (result_status == SUCCESS) {
+ // Should we look for NSEC covering the name?
+ if (get_cover) {
+ result_rrset = findNSECCover(name);
+ if (result_rrset) {
+ result_status = NXDOMAIN;
+ }
+ }
+ // Something is not here and we didn't decide yet what
+ if (records_found) {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXRRSET)
+ .arg(accessor_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXRRSET;
+ } else {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXDOMAIN)
+ .arg(accessor_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXDOMAIN;
+ }
}
} else {
- sig_store.appendSignatures(result_rrset);
logger.debug(DBG_TRACE_DETAILED,
DATASRC_DATABASE_FOUND_RRSET)
- .arg(database_->getDBName()).arg(*result_rrset);
+ .arg(accessor_->getDBName()).arg(*result_rrset);
}
return (FindResult(result_status, result_rrset));
}
Name
+DatabaseClient::Finder::findPreviousName(const Name& name) const {
+ const string str(accessor_->findPreviousName(zone_id_,
+ name.reverse().toText()));
+ try {
+ return (Name(str));
+ }
+ /*
+ * To avoid having the same code many times, we just catch all the
+ * exceptions and handle them in a common code below
+ */
+ catch (const isc::dns::EmptyLabel&) {}
+ catch (const isc::dns::TooLongLabel&) {}
+ catch (const isc::dns::BadLabelType&) {}
+ catch (const isc::dns::BadEscape&) {}
+ catch (const isc::dns::TooLongName&) {}
+ catch (const isc::dns::IncompleteName&) {}
+ isc_throw(DataSourceError, "Bad name " + str + " from findPreviousName");
+}
+
+Name
DatabaseClient::Finder::getOrigin() const {
- // TODO Implement
- return (Name("."));
+ return (origin_);
}
isc::dns::RRClass
@@ -308,5 +687,274 @@ DatabaseClient::Finder::getClass() const {
return isc::dns::RRClass::IN();
}
+namespace {
+
+/*
+ * This needs, beside of converting all data from textual representation, group
+ * together rdata of the same RRsets. To do this, we hold one row of data ahead
+ * of iteration. When we get a request to provide data, we create it from this
+ * data and load a new one. If it is to be put to the same rrset, we add it.
+ * Otherwise we just return what we have and keep the row as the one ahead
+ * for next time.
+ */
+class DatabaseIterator : public ZoneIterator {
+public:
+ DatabaseIterator(const DatabaseAccessor::IteratorContextPtr& context,
+ const RRClass& rrclass) :
+ context_(context),
+ class_(rrclass),
+ ready_(true)
+ {
+ // Prepare data for the next time
+ getData();
+ }
+
+ virtual isc::dns::ConstRRsetPtr getNextRRset() {
+ if (!ready_) {
+ isc_throw(isc::Unexpected, "Iterating past the zone end");
+ }
+ if (!data_ready_) {
+ // At the end of zone
+ ready_ = false;
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_ITERATE_END);
+ return (ConstRRsetPtr());
+ }
+ string name_str(name_), rtype_str(rtype_), ttl(ttl_);
+ Name name(name_str);
+ RRType rtype(rtype_str);
+ RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
+ while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
+ if (ttl_ != ttl) {
+ if (ttl < ttl_) {
+ ttl_ = ttl;
+ rrset->setTTL(RRTTL(ttl));
+ }
+ LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
+ arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
+ }
+ rrset->addRdata(rdata::createRdata(rtype, class_, rdata_));
+ getData();
+ }
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE_NEXT).
+ arg(rrset->getName()).arg(rrset->getType());
+ return (rrset);
+ }
+private:
+ // Load next row of data
+ void getData() {
+ string data[DatabaseAccessor::COLUMN_COUNT];
+ data_ready_ = context_->getNext(data);
+ name_ = data[DatabaseAccessor::NAME_COLUMN];
+ rtype_ = data[DatabaseAccessor::TYPE_COLUMN];
+ ttl_ = data[DatabaseAccessor::TTL_COLUMN];
+ rdata_ = data[DatabaseAccessor::RDATA_COLUMN];
+ }
+
+ // The context
+ const DatabaseAccessor::IteratorContextPtr context_;
+ // Class of the zone
+ RRClass class_;
+ // Status
+ bool ready_, data_ready_;
+ // Data of the next row
+ string name_, rtype_, rdata_, ttl_;
+};
+
+}
+
+ZoneIteratorPtr
+DatabaseClient::getIterator(const isc::dns::Name& name) const {
+ // Get the zone
+ std::pair<bool, int> zone(accessor_->getZone(name.toText()));
+ if (!zone.first) {
+ // No such zone, can't continue
+ isc_throw(DataSourceError, "Zone " + name.toText() +
+ " can not be iterated, because it doesn't exist "
+ "in this data source");
+ }
+ // Request the context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor_->getAllRecords(zone.second));
+ // It must not return NULL, that's a bug of the implementation
+ if (context == DatabaseAccessor::IteratorContextPtr()) {
+ isc_throw(isc::Unexpected, "Iterator context null at " +
+ name.toText());
+ }
+ // Create the iterator and return it
+ // TODO: Once #1062 is merged with this, we need to get the
+ // actual zone class from the connection, as the DatabaseClient
+ // doesn't know it and the iterator needs it (so it wouldn't query
+ // it each time)
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE).
+ arg(name);
+ return (ZoneIteratorPtr(new DatabaseIterator(context, RRClass::IN())));
+}
+
+//
+// Zone updater using some database system as the underlying data source.
+//
+class DatabaseUpdater : public ZoneUpdater {
+public:
+ DatabaseUpdater(shared_ptr<DatabaseAccessor> accessor, int zone_id,
+ const Name& zone_name, const RRClass& zone_class) :
+ committed_(false), accessor_(accessor), zone_id_(zone_id),
+ db_name_(accessor->getDBName()), zone_name_(zone_name.toText()),
+ zone_class_(zone_class),
+ finder_(new DatabaseClient::Finder(accessor_, zone_id_, zone_name))
+ {
+ logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_CREATED)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+ }
+
+ virtual ~DatabaseUpdater() {
+ if (!committed_) {
+ try {
+ accessor_->rollbackUpdateZone();
+ logger.info(DATASRC_DATABASE_UPDATER_ROLLBACK)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+ } catch (const DataSourceError& e) {
+ // We generally expect that rollback always succeeds, and
+ // it should in fact succeed in a way we execute it. But
+ // as the public API allows rollbackUpdateZone() to fail and
+ // throw, we should expect it. Obviously we cannot re-throw
+ // it. The best we can do is to log it as a critical error.
+ logger.error(DATASRC_DATABASE_UPDATER_ROLLBACKFAIL)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_)
+ .arg(e.what());
+ }
+ }
+
+ logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_DESTROYED)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+ }
+
+ virtual ZoneFinder& getFinder() { return (*finder_); }
+
+ virtual void addRRset(const RRset& rrset);
+ virtual void deleteRRset(const RRset& rrset);
+ virtual void commit();
+
+private:
+ bool committed_;
+ shared_ptr<DatabaseAccessor> accessor_;
+ const int zone_id_;
+ const string db_name_;
+ const string zone_name_;
+ const RRClass zone_class_;
+ boost::scoped_ptr<DatabaseClient::Finder> finder_;
+};
+
+void
+DatabaseUpdater::addRRset(const RRset& rrset) {
+ if (committed_) {
+ isc_throw(DataSourceError, "Add attempt after commit to zone: "
+ << zone_name_ << "/" << zone_class_);
+ }
+ if (rrset.getClass() != zone_class_) {
+ isc_throw(DataSourceError, "An RRset of a different class is being "
+ << "added to " << zone_name_ << "/" << zone_class_ << ": "
+ << rrset.toText());
+ }
+ if (rrset.getRRsig()) {
+ isc_throw(DataSourceError, "An RRset with RRSIG is being added to "
+ << zone_name_ << "/" << zone_class_ << ": "
+ << rrset.toText());
+ }
+
+ RdataIteratorPtr it = rrset.getRdataIterator();
+ if (it->isLast()) {
+ isc_throw(DataSourceError, "An empty RRset is being added for "
+ << rrset.getName() << "/" << zone_class_ << "/"
+ << rrset.getType());
+ }
+
+ string columns[DatabaseAccessor::ADD_COLUMN_COUNT]; // initialized with ""
+ columns[DatabaseAccessor::ADD_NAME] = rrset.getName().toText();
+ columns[DatabaseAccessor::ADD_REV_NAME] =
+ rrset.getName().reverse().toText();
+ columns[DatabaseAccessor::ADD_TTL] = rrset.getTTL().toText();
+ columns[DatabaseAccessor::ADD_TYPE] = rrset.getType().toText();
+ for (; !it->isLast(); it->next()) {
+ if (rrset.getType() == RRType::RRSIG()) {
+ // XXX: the current interface (based on the current sqlite3
+ // data source schema) requires a separate "sigtype" column,
+ // even though it won't be used in a newer implementation.
+ // We should eventually clean up the schema design and simplify
+ // the interface, but until then we have to conform to the schema.
+ const generic::RRSIG& rrsig_rdata =
+ dynamic_cast<const generic::RRSIG&>(it->getCurrent());
+ columns[DatabaseAccessor::ADD_SIGTYPE] =
+ rrsig_rdata.typeCovered().toText();
+ }
+ columns[DatabaseAccessor::ADD_RDATA] = it->getCurrent().toText();
+ accessor_->addRecordToZone(columns);
+ }
+}
+
+void
+DatabaseUpdater::deleteRRset(const RRset& rrset) {
+ if (committed_) {
+ isc_throw(DataSourceError, "Delete attempt after commit on zone: "
+ << zone_name_ << "/" << zone_class_);
+ }
+ if (rrset.getClass() != zone_class_) {
+ isc_throw(DataSourceError, "An RRset of a different class is being "
+ << "deleted from " << zone_name_ << "/" << zone_class_
+ << ": " << rrset.toText());
+ }
+ if (rrset.getRRsig()) {
+ isc_throw(DataSourceError, "An RRset with RRSIG is being deleted from "
+ << zone_name_ << "/" << zone_class_ << ": "
+ << rrset.toText());
+ }
+
+ RdataIteratorPtr it = rrset.getRdataIterator();
+ if (it->isLast()) {
+ isc_throw(DataSourceError, "An empty RRset is being deleted for "
+ << rrset.getName() << "/" << zone_class_ << "/"
+ << rrset.getType());
+ }
+
+ string params[DatabaseAccessor::DEL_PARAM_COUNT]; // initialized with ""
+ params[DatabaseAccessor::DEL_NAME] = rrset.getName().toText();
+ params[DatabaseAccessor::DEL_TYPE] = rrset.getType().toText();
+ for (; !it->isLast(); it->next()) {
+ params[DatabaseAccessor::DEL_RDATA] = it->getCurrent().toText();
+ accessor_->deleteRecordInZone(params);
+ }
+}
+
+void
+DatabaseUpdater::commit() {
+ if (committed_) {
+ isc_throw(DataSourceError, "Duplicate commit attempt for "
+ << zone_name_ << "/" << zone_class_ << " on "
+ << db_name_);
+ }
+ accessor_->commitUpdateZone();
+ committed_ = true; // make sure the destructor won't trigger rollback
+
+ // We release the accessor immediately after commit is completed so that
+ // we don't hold the possible internal resource any longer.
+ accessor_.reset();
+
+ logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_COMMIT)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+}
+
+// The updater factory
+ZoneUpdaterPtr
+DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace) const {
+ shared_ptr<DatabaseAccessor> update_accessor(accessor_->clone());
+ const std::pair<bool, int> zone(update_accessor->startUpdateZone(
+ name.toText(), replace));
+ if (!zone.first) {
+ return (ZoneUpdaterPtr());
+ }
+
+ return (ZoneUpdaterPtr(new DatabaseUpdater(update_accessor, zone.second,
+ name, rrclass_)));
+}
}
}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index 5253e60..8295779 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -15,8 +15,22 @@
#ifndef __DATABASE_DATASRC_H
#define __DATABASE_DATASRC_H
+#include <string>
+
+#include <boost/scoped_ptr.hpp>
+
+#include <dns/rrclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrset.h>
+
#include <datasrc/client.h>
+#include <dns/name.h>
+#include <exceptions/exceptions.h>
+
+#include <map>
+#include <set>
+
namespace isc {
namespace datasrc {
@@ -46,12 +60,67 @@ namespace datasrc {
class DatabaseAccessor : boost::noncopyable {
public:
/**
+ * Definitions of the fields as they are required to be filled in
+ * by IteratorContext::getNext()
+ *
+ * When implementing getNext(), the columns array should
+ * be filled with the values as described in this enumeration,
+ * in this order, i.e. TYPE_COLUMN should be the first element
+ * (index 0) of the array, TTL_COLUMN should be the second element
+ * (index 1), etc.
+ */
+ enum RecordColumns {
+ TYPE_COLUMN = 0, ///< The RRType of the record (A/NS/TXT etc.)
+ TTL_COLUMN = 1, ///< The TTL of the record (a
+ SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPE
+ ///< the RRSIG covers. In the current implementation,
+ ///< this field is ignored.
+ RDATA_COLUMN = 3, ///< Full text representation of the record's RDATA
+ NAME_COLUMN = 4, ///< The domain name of this RR
+ COLUMN_COUNT = 5 ///< The total number of columns, MUST be value of
+ ///< the largest other element in this enum plus 1.
+ };
+
+ /**
+ * Definitions of the fields to be passed to addRecordToZone().
+ *
+ * Each derived implementation of addRecordToZone() should expect
+ * the "columns" vector to be filled with the values as described in this
+ * enumeration, in this order.
+ */
+ enum AddRecordColumns {
+ ADD_NAME = 0, ///< The owner name of the record (a domain name)
+ ADD_REV_NAME = 1, ///< Reversed name of NAME (used for DNSSEC)
+ ADD_TTL = 2, ///< The TTL of the record (in numeric form)
+ ADD_TYPE = 3, ///< The RRType of the record (A/NS/TXT etc.)
+ ADD_SIGTYPE = 4, ///< For RRSIG records, this contains the RRTYPE
+ ///< the RRSIG covers.
+ ADD_RDATA = 5, ///< Full text representation of the record's RDATA
+ ADD_COLUMN_COUNT = 6 ///< Number of columns
+ };
+
+ /**
+ * Definitions of the fields to be passed to deleteRecordInZone().
+ *
+ * Each derived implementation of deleteRecordInZone() should expect
+ * the "params" vector to be filled with the values as described in this
+ * enumeration, in this order.
+ */
+ enum DeleteRecordParams {
+ DEL_NAME = 0, ///< The owner name of the record (a domain name)
+ DEL_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
+ DEL_RDATA = 2, ///< Full text representation of the record's RDATA
+ DEL_PARAM_COUNT = 3 ///< Number of parameters
+ };
+
+ /**
* \brief Destructor
*
* It is empty, but needs a virtual one, since we will use the derived
* classes in polymorphic way.
*/
virtual ~DatabaseAccessor() { }
+
/**
* \brief Retrieve a zone identifier
*
@@ -63,7 +132,8 @@ public:
* It is not specified if and what implementation of this method may throw,
* so code should expect anything.
*
- * \param name The name of the zone's apex to be looked up.
+ * \param name The (fully qualified) domain name of the zone's apex to be
+ * looked up.
* \return The first part of the result indicates if a matching zone
* was found. In case it was, the second part is internal zone ID.
* This one will be passed to methods finding data in the zone.
@@ -71,83 +141,325 @@ public:
* be returned - the ID is only passed back to the database as
* an opaque handle.
*/
- virtual std::pair<bool, int> getZone(const isc::dns::Name& name) const = 0;
+ virtual std::pair<bool, int> getZone(const std::string& name) const = 0;
/**
- * \brief Starts a new search for records of the given name in the given zone
- *
- * The data searched by this call can be retrieved with subsequent calls to
- * getNextRecord().
+ * \brief This holds the internal context of ZoneIterator for databases
*
- * \exception DataSourceError if there is a problem connecting to the
- * backend database
+ * While the ZoneIterator implementation from DatabaseClient does all the
+ * translation from strings to DNS classes and validation, this class
+ * holds the pointer to where the database is at reading the data.
*
- * \param zone_id The zone to search in, as returned by getZone()
- * \param name The name of the records to find
+ * It can either hold shared pointer to the connection which created it
+ * and have some kind of statement inside (in case single database
+ * connection can handle multiple concurrent SQL statements) or it can
+ * create a new connection (or, if it is more convenient, the connection
+ * itself can inherit both from DatabaseConnection and IteratorContext
+ * and just clone itself).
*/
- virtual void searchForRecords(int zone_id, const std::string& name) = 0;
+ class IteratorContext : public boost::noncopyable {
+ public:
+ /**
+ * \brief Destructor
+ *
+ * Virtual destructor, so any descendand class is destroyed correctly.
+ */
+ virtual ~IteratorContext() { }
+
+ /**
+ * \brief Function to provide next resource record
+ *
+ * This function should provide data about the next resource record
+ * from the data that is searched. The data is not converted yet.
+ *
+ * Depending on how the iterator was constructed, there is a difference
+ * in behaviour; for a 'full zone iterator', created with
+ * getAllRecords(), all COLUMN_COUNT elements of the array are
+ * overwritten.
+ * For a 'name iterator', created with getRecords(), the column
+ * NAME_COLUMN is untouched, since what would be added here is by
+ * definition already known to the caller (it already passes it as
+ * an argument to getRecords()).
+ *
+ * Once this function returns false, any subsequent call to it should
+ * result in false. The implementation of a derived class must ensure
+ * it doesn't cause any disruption due to that such as a crash or
+ * exception.
+ *
+ * \note The order of RRs is not strictly set, but the RRs for single
+ * RRset must not be interleaved with any other RRs (eg. RRsets must be
+ * "together").
+ *
+ * \param columns The data will be returned through here. The order
+ * is specified by the RecordColumns enum, and the size must be
+ * COLUMN_COUNT
+ * \todo Do we consider databases where it is stored in binary blob
+ * format?
+ * \throw DataSourceError if there's database-related error. If the
+ * exception (or any other in case of derived class) is thrown,
+ * the iterator can't be safely used any more.
+ * \return true if a record was found, and the columns array was
+ * updated. false if there was no more data, in which case
+ * the columns array is untouched.
+ */
+ virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) = 0;
+ };
+
+ typedef boost::shared_ptr<IteratorContext> IteratorContextPtr;
/**
- * \brief Retrieves the next record from the search started with searchForRecords()
+ * \brief Creates an iterator context for a specific name.
*
- * Returns a boolean specifying whether or not there was more data to read.
- * In the case of a database error, a DatasourceError is thrown.
+ * Returns an IteratorContextPtr that contains all records of the
+ * given name from the given zone.
*
- * The columns passed is an array of std::strings consisting of
- * DatabaseConnection::COLUMN_COUNT elements, the elements of which
- * are defined in DatabaseConnection::RecordColumns, in their basic
- * string representation.
+ * The implementation of the iterator that is returned may leave the
+ * NAME_COLUMN column of the array passed to getNext() untouched, as that
+ * data is already known (it is the same as the name argument here)
*
- * If you are implementing a derived database connection class, you
- * should have this method check the column_count value, and fill the
- * array with strings conforming to their description in RecordColumn.
+ * \exception any Since any implementation can be used, the caller should
+ * expect any exception to be thrown.
*
- * \exception DatasourceError if there was an error reading from the database
- *
- * \param columns The elements of this array will be filled with the data
- * for one record as defined by RecordColumns
- * If there was no data, the array is untouched.
- * \return true if there was a next record, false if there was not
+ * \param name The name to search for. This should be a FQDN.
+ * \param id The ID of the zone, returned from getZone().
+ * \param subdomains If set to true, match subdomains of name instead
+ * of name itself. It is used to find empty domains and match
+ * wildcards.
+ * \return Newly created iterator context. Must not be NULL.
*/
- virtual bool getNextRecord(std::string columns[], size_t column_count) = 0;
+ virtual IteratorContextPtr getRecords(const std::string& name,
+ int id,
+ bool subdomains = false) const = 0;
/**
- * \brief Resets the current search initiated with searchForRecords()
+ * \brief Creates an iterator context for the whole zone.
*
- * This method will be called when the called of searchForRecords() and
- * getNextRecord() finds bad data, and aborts the current search.
- * It should clean up whatever handlers searchForRecords() created, and
- * any other state modified or needed by getNextRecord()
+ * Returns an IteratorContextPtr that contains all records of the
+ * zone with the given zone id.
*
- * Of course, the implementation of getNextRecord may also use it when
- * it is done with a search. If it does, the implementation of this
- * method should make sure it can handle being called multiple times.
+ * Each call to getNext() on the returned iterator should copy all
+ * column fields of the array that is passed, as defined in the
+ * RecordColumns enum.
*
- * The implementation for this method should make sure it never throws.
- */
- virtual void resetSearch() = 0;
-
- /**
- * Definitions of the fields as they are required to be filled in
- * by getNextRecord()
+ * \exception any Since any implementation can be used, the caller should
+ * expect any exception to be thrown.
*
- * When implementing getNextRecord(), the columns array should
- * be filled with the values as described in this enumeration,
- * in this order, i.e. TYPE_COLUMN should be the first element
- * (index 0) of the array, TTL_COLUMN should be the second element
- * (index 1), etc.
+ * \param id The ID of the zone, returned from getZone().
+ * \return Newly created iterator context. Must not be NULL.
*/
- enum RecordColumns {
- TYPE_COLUMN = 0, ///< The RRType of the record (A/NS/TXT etc.)
- TTL_COLUMN = 1, ///< The TTL of the record (a
- SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPE
- ///< the RRSIG covers. In the current implementation,
- ///< this field is ignored.
- RDATA_COLUMN = 3 ///< Full text representation of the record's RDATA
- };
+ virtual IteratorContextPtr getAllRecords(int id) const = 0;
+
+ /// Start a transaction for updating a zone.
+ ///
+ /// Each derived class version of this method starts a database
+ /// transaction to make updates to the given name of zone (whose class was
+ /// specified at the construction of the class).
+ ///
+ /// If \c replace is true, any existing records of the zone will be
+ /// deleted on successful completion of updates (after
+ /// \c commitUpdateZone()); if it's false, the existing records will be
+ /// intact unless explicitly deleted by \c deleteRecordInZone().
+ ///
+ /// A single \c DatabaseAccessor instance can perform at most one update
+ /// transaction; a duplicate call to this method before
+ /// \c commitUpdateZone() or \c rollbackUpdateZone() will result in
+ /// a \c DataSourceError exception. If multiple update attempts need
+ /// to be performed concurrently (and if the underlying database allows
+ /// such operation), separate \c DatabaseAccessor instance must be
+ /// created.
+ ///
+ /// \note The underlying database may not allow concurrent updates to
+ /// the same database instance even if different "connections" (or
+ /// something similar specific to the database implementation) are used
+ /// for different sets of updates. For example, it doesn't seem to be
+ /// possible for SQLite3 unless different databases are used. MySQL
+ /// allows concurrent updates to different tables of the same database,
+ /// but a specific operation may block others. As such, this interface
+ /// doesn't require derived classes to allow concurrent updates with
+ /// multiple \c DatabaseAccessor instances; however, the implementation
+ /// is encouraged to do the best for making it more likely to succeed
+ /// as long as the underlying database system allows concurrent updates.
+ ///
+ /// This method returns a pair of \c bool and \c int. Its first element
+ /// indicates whether the given name of zone is found. If it's false,
+ /// the transaction isn't considered to be started; a subsequent call to
+ /// this method with an existing zone name should succeed. Likewise,
+ /// if a call to this method results in an exception, the transaction
+ /// isn't considered to be started. Note also that if the zone is not
+ /// found this method doesn't try to create a new one in the database.
+ /// It must have been created by some other means beforehand.
+ ///
+ /// The second element is the internal zone ID used for subsequent
+ /// updates. Depending on implementation details of the actual derived
+ /// class method, it may be different from the one returned by
+ /// \c getZone(); for example, a specific implementation may use a
+ /// completely new zone ID when \c replace is true.
+ ///
+ /// \exception DataSourceError Duplicate call to this method, or some
+ /// internal database related error.
+ ///
+ /// \param zone_name A string representation of the zone name to be updated
+ /// \param replace Whether to replace the entire zone (see above)
+ ///
+ /// \return A pair of bool and int, indicating whether the specified zone
+ /// exists and (if so) the zone ID to be used for the update, respectively.
+ virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
+ bool replace) = 0;
+
+ /// Add a single record to the zone to be updated.
+ ///
+ /// This method provides a simple interface to insert a new record
+ /// (a database "row") to the zone in the update context started by
+ /// \c startUpdateZone(). The zone to which the record to be added
+ /// is the one specified at the time of the \c startUpdateZone() call.
+ ///
+ /// A successful call to \c startUpdateZone() must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ ///
+ /// The row is defined as a vector of strings that has exactly
+ /// ADD_COLUMN_COUNT number of elements. See AddRecordColumns for
+ /// the semantics of each element.
+ ///
+ /// Derived class methods are not required to check whether the given
+ /// values in \c columns are valid in terms of the expected semantics;
+ /// in general, it's the caller's responsibility.
+ /// For example, TTLs would normally be expected to be a textual
+ /// representation of decimal numbers, but this interface doesn't require
+ /// the implementation to perform this level of validation. It may check
+ /// the values, however, and in that case if it detects an error it
+ /// should throw a \c DataSourceError exception.
+ ///
+ /// Likewise, derived class methods are not required to detect any
+ /// duplicate record that is already in the zone.
+ ///
+ /// \note The underlying database schema may not have a trivial mapping
+ /// from this style of definition of rows to actual database records.
+ /// It's the implementation's responsibility to implement the mapping
+ /// in the actual derived method.
+ ///
+ /// \exception DataSourceError Invalid call without starting a transaction,
+ /// or other internal database error.
+ ///
+ /// \param columns An array of strings that defines a record to be added
+ /// to the zone.
+ virtual void addRecordToZone(
+ const std::string (&columns)[ADD_COLUMN_COUNT]) = 0;
+
+ /// Delete a single record from the zone to be updated.
+ ///
+ /// This method provides a simple interface to delete a record
+ /// (a database "row") from the zone in the update context started by
+ /// \c startUpdateZone(). The zone from which the record to be deleted
+ /// is the one specified at the time of the \c startUpdateZone() call.
+ ///
+ /// A successful call to \c startUpdateZone() must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ ///
+ /// The record to be deleted is specified by a vector of strings that has
+ /// exactly DEL_PARAM_COUNT number of elements. See DeleteRecordParams
+ /// for the semantics of each element.
+ ///
+ /// \note In IXFR, TTL may also be specified, but we intentionally
+ /// ignore that in this interface, because it's not guaranteed
+ /// that all records have the same TTL (unlike the RRset
+ /// assumption) and there can even be multiple records for the
+ /// same name, type and rdata with different TTLs. If we only
+ /// delete one of them, subsequent lookup will still return a
+ /// positive answer, which would be confusing. It's a higher
+ /// layer's responsibility to check if there is at least one
+ /// record in the database that has the given TTL.
+ ///
+ /// Like \c addRecordToZone, derived class methods are not required to
+ /// validate the semantics of the given parameters or to check if there
+ /// is a record that matches the specified parameter; if there isn't
+ /// it simply ignores the result.
+ ///
+ /// \exception DataSourceError Invalid call without starting a transaction,
+ /// or other internal database error.
+ ///
+ /// \param params An array of strings that defines a record to be deleted
+ /// from the zone.
+ virtual void deleteRecordInZone(
+ const std::string (¶ms)[DEL_PARAM_COUNT]) = 0;
- /// The number of fields the columns array passed to getNextRecord should have
- static const size_t COLUMN_COUNT = 4;
+ /// Commit updates to the zone.
+ ///
+ /// This method completes a transaction of making updates to the zone
+ /// in the context started by startUpdateZone.
+ ///
+ /// A successful call to \c startUpdateZone() must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ /// Once this method successfully completes, the transaction isn't
+ /// considered to exist any more. So a new transaction can now be
+ /// started. On the other hand, a duplicate call to this method after
+ /// a successful completion of it is invalid and should result in
+ /// a \c DataSourceError exception.
+ ///
+ /// If some internal database error happens, a \c DataSourceError
+ /// exception must be thrown. In that case the transaction is still
+ /// considered to be valid; the caller must explicitly rollback it
+ /// or (if it's confident that the error is temporary) try to commit it
+ /// again.
+ ///
+ /// \exception DataSourceError Call without a transaction, duplicate call
+ /// to the method or internal database error.
+ virtual void commitUpdateZone() = 0;
+
+ /// Rollback updates to the zone made so far.
+ ///
+ /// This method rollbacks a transaction of making updates to the zone
+ /// in the context started by startUpdateZone. When it succeeds
+ /// (it normally should, but see below), the underlying database should
+ /// be reverted to the point before performing the corresponding
+ /// \c startUpdateZone().
+ ///
+ /// A successful call to \c startUpdateZone() must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ /// Once this method successfully completes, the transaction isn't
+ /// considered to exist any more. So a new transaction can now be
+ /// started. On the other hand, a duplicate call to this method after
+ /// a successful completion of it is invalid and should result in
+ /// a \c DataSourceError exception.
+ ///
+ /// Normally this method should not fail. But it may not always be
+ /// possible to guarantee it depending on the characteristics of the
+ /// underlying database system. So this interface doesn't require the
+ /// actual implementation for the error free property. But if a specific
+ /// implementation of this method can fail, it is encouraged to document
+ /// when that can happen with its implication.
+ ///
+ /// \exception DataSourceError Call without a transaction, duplicate call
+ /// to the method or internal database error.
+ virtual void rollbackUpdateZone() = 0;
+
+ /// Clone the accessor with the same configuration.
+ ///
+ /// Each derived class implementation of this method will create a new
+ /// accessor of the same derived class with the same configuration
+ /// (such as the database server address) as that of the caller object
+ /// and return it.
+ ///
+ /// Note that other internal states won't be copied to the new accessor
+ /// even though the name of "clone" may indicate so. For example, even
+ /// if the calling accessor is in the middle of a update transaction,
+ /// the new accessor will not start a transaction to trace the same
+ /// updates.
+ ///
+ /// The intended use case of cloning is to create a separate context
+ /// where a specific set of database operations can be performed
+ /// independently from the original accessor. The updater will use it
+ /// so that multiple updaters can be created concurrently even if the
+ /// underlying database system doesn't allow running multiple transactions
+ /// in a single database connection.
+ ///
+ /// The underlying database system may not support the functionality
+ /// that would be needed to implement this method. For example, it
+ /// may not allow a single thread (or process) to have more than one
+ /// database connections. In such a case the derived class implementation
+ /// should throw a \c DataSourceError exception.
+ ///
+ /// \return A shared pointer to the cloned accessor.
+ virtual boost::shared_ptr<DatabaseAccessor> clone() = 0;
/**
* \brief Returns a string identifying this dabase backend
@@ -162,6 +474,34 @@ public:
* \return the name of the database
*/
virtual const std::string& getDBName() const = 0;
+
+ /**
+ * \brief It returns the previous name in DNSSEC order.
+ *
+ * This is used in DatabaseClient::findPreviousName and does more
+ * or less the real work, except for working on strings.
+ *
+ * \param rname The name to ask for previous of, in reversed form.
+ * We use the reversed form (see isc::dns::Name::reverse),
+ * because then the case insensitive order of string representation
+ * and the DNSSEC order correspond (eg. org.example.a is followed
+ * by org.example.a.b which is followed by org.example.b, etc).
+ * \param zone_id The zone to look through.
+ * \return The previous name.
+ * \note This function must return previous name even in case
+ * the queried rname does not exist in the zone.
+ * \note This method must skip under-the-zone-cut data (glue data).
+ * This might be implemented by looking for NSEC records (as glue
+ * data don't have them) in the zone or in some other way.
+ *
+ * \throw DataSourceError if there's a problem with the database.
+ * \throw NotImplemented if this database doesn't support DNSSEC
+ * or there's no previous name for the queried one (the NSECs
+ * might be missing or the queried name is less or equal the
+ * apex of the zone).
+ */
+ virtual std::string findPreviousName(int zone_id,
+ const std::string& rname) const = 0;
};
/**
@@ -183,16 +523,19 @@ public:
/**
* \brief Constructor
*
- * It initializes the client with a database.
+ * It initializes the client with a database via the given accessor.
*
- * \exception isc::InvalidParameter if database is NULL. It might throw
+ * \exception isc::InvalidParameter if accessor is NULL. It might throw
* standard allocation exception as well, but doesn't throw anything else.
*
- * \param database The database to use to get data. As the parameter
- * suggests, the client takes ownership of the database and will
- * delete it when itself deleted.
+ * \param rrclass The RR class of the zones that this client will handle.
+ * \param accessor The accessor to the database to use to get data.
+ * As the parameter suggests, the client takes ownership of the accessor
+ * and will delete it when itself deleted.
*/
- DatabaseClient(boost::shared_ptr<DatabaseAccessor> database);
+ DatabaseClient(isc::dns::RRClass rrclass,
+ boost::shared_ptr<DatabaseAccessor> accessor);
+
/**
* \brief Corresponding ZoneFinder implementation
*
@@ -218,8 +561,12 @@ public:
* \param zone_id The zone ID which was returned from
* DatabaseAccessor::getZone and which will be passed to further
* calls to the database.
+ * \param origin The name of the origin of this zone. It could query
+ * it from database, but as the DatabaseClient just searched for
+ * the zone using the name, it should have it.
*/
- Finder(boost::shared_ptr<DatabaseAccessor> database, int zone_id);
+ Finder(boost::shared_ptr<DatabaseAccessor> database, int zone_id,
+ const isc::dns::Name& origin);
// The following three methods are just implementations of inherited
// ZoneFinder's pure virtual methods.
virtual isc::dns::Name getOrigin() const;
@@ -262,7 +609,8 @@ public:
* \param name The name to find
* \param type The RRType to find
* \param target Unused at this moment
- * \param options Unused at this moment
+ * \param options Options about how to search.
+ * See ZoneFinder::FindOptions.
*/
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
@@ -270,6 +618,12 @@ public:
const FindOptions options = FIND_DEFAULT);
/**
+ * \brief Implementation of ZoneFinder::findPreviousName method.
+ */
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
+ const;
+
+ /**
* \brief The zone ID
*
* This function provides the stored zone ID as passed to the
@@ -277,20 +631,85 @@ public:
* applications shouldn't need it.
*/
int zone_id() const { return (zone_id_); }
+
/**
- * \brief The database.
+ * \brief The database accessor.
*
- * This function provides the database stored inside as
+ * This function provides the database accessor stored inside as
* passed to the constructor. This is meant for testing purposes and
* normal applications shouldn't need it.
*/
- const DatabaseAccessor& database() const {
- return (*database_);
+ const DatabaseAccessor& getAccessor() const {
+ return (*accessor_);
}
private:
- boost::shared_ptr<DatabaseAccessor> database_;
+ boost::shared_ptr<DatabaseAccessor> accessor_;
const int zone_id_;
+ const isc::dns::Name origin_;
+ //
+ /// \brief Shortcut name for the result of getRRsets
+ typedef std::pair<bool, std::map<dns::RRType, dns::RRsetPtr> >
+ FoundRRsets;
+ /// \brief Just shortcut for set of types
+ typedef std::set<dns::RRType> WantedTypes;
+ /**
+ * \brief Searches database for RRsets of one domain.
+ *
+ * This method scans RRs of single domain specified by name and
+ * extracts any RRsets found and requested by parameters.
+ *
+ * It is used internally by find(), because it is called multiple
+ * times (usually with different domains).
+ *
+ * \param name Which domain name should be scanned.
+ * \param types List of types the caller is interested in.
+ * \param check_ns If this is set to true, it checks nothing lives
+ * together with NS record (with few little exceptions, like RRSIG
+ * or NSEC). This check is meant for non-apex NS records.
+ * \param construct_name If this is NULL, the resulting RRsets have
+ * their name set to name. If it is not NULL, it overrides the name
+ * and uses this one (this can be used for wildcard synthesized
+ * records).
+ * \return A pair, where the first element indicates if the domain
+ * contains any RRs at all (not only the requested, it may happen
+ * this is set to true, but the second part is empty). The second
+ * part is map from RRtypes to RRsets of the corresponding types.
+ * If the RRset is not present in DB, the RRtype is not there at
+ * all (so you'll not find NULL pointer in the result).
+ * \throw DataSourceError If there's a low-level error with the
+ * database or the database contains bad data.
+ */
+ FoundRRsets getRRsets(const std::string& name,
+ const WantedTypes& types, bool check_ns,
+ const std::string* construct_name = NULL);
+ /**
+ * \brief Checks if something lives below this domain.
+ *
+ * This looks if there's any subdomain of the given name. It can be
+ * used to test if domain is empty non-terminal.
+ *
+ * \param name The domain to check.
+ */
+ bool hasSubdomains(const std::string& name);
+
+ /**
+ * \brief Get the NSEC covering a name.
+ *
+ * This one calls findPreviousName on the given name and extracts an NSEC
+ * record on the result. It handles various error cases. The method exists
+ * to share code present at more than one location.
+ */
+ dns::RRsetPtr findNSECCover(const dns::Name& name);
+
+ /**
+ * \brief Convenience type shortcut.
+ *
+ * To find stuff in the result of getRRsets.
+ */
+ typedef std::map<dns::RRType, dns::RRsetPtr>::const_iterator
+ FoundIterator;
};
+
/**
* \brief Find a zone in the database
*
@@ -307,12 +726,45 @@ public:
*/
virtual FindResult findZone(const isc::dns::Name& name) const;
+ /**
+ * \brief Get the zone iterator
+ *
+ * The iterator allows going through the whole zone content. If the
+ * underlying DatabaseConnection is implemented correctly, it should
+ * be possible to have multiple ZoneIterators at once and query data
+ * at the same time.
+ *
+ * \exception DataSourceError if the zone doesn't exist.
+ * \exception isc::NotImplemented if the underlying DatabaseConnection
+ * doesn't implement iteration. But in case it is not implemented
+ * and the zone doesn't exist, DataSourceError is thrown.
+ * \exception Anything else the underlying DatabaseConnection might
+ * want to throw.
+ * \param name The origin of the zone to iterate.
+ * \return Shared pointer to the iterator (it will never be NULL)
+ */
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const;
+
+ /// This implementation internally clones the accessor from the one
+ /// used in the client and starts a separate transaction using the cloned
+ /// accessor. The returned updater will be able to work separately from
+ /// the original client.
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+ bool replace) const;
+
private:
- /// \brief Our database.
- const boost::shared_ptr<DatabaseAccessor> database_;
+ /// \brief The RR class that this client handles.
+ const isc::dns::RRClass rrclass_;
+
+ /// \brief The accessor to our database.
+ const boost::shared_ptr<DatabaseAccessor> accessor_;
};
}
}
-#endif
+#endif // __DATABASE_DATASRC_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index 6af4fe6..04ad610 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -63,11 +63,10 @@ The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
means no limit.
-% DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2
-This was an internal error while reading data from a datasource. This can either
-mean the specific data source implementation is not behaving correctly, or the
-data it provides is invalid. The current search is aborted.
-The error message contains specific information about the error.
+% DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3
Debug information. The database data source is looking up records with the given
@@ -75,20 +74,28 @@ name and type in the database.
% DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5
The datasource backend provided resource records for the given RRset with
-different TTL values. The TTL of the RRSET is set to the lowest value, which
-is printed in the log message.
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
-% DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2
-There was an uncaught general exception while reading data from a datasource.
-This most likely points to a logic error in the code, and can be considered a
-bug. The current search is aborted. Specific information about the exception is
-printed in this error message.
+% DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
-% DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2
-There was an uncaught ISC exception while reading data from a datasource. This
-most likely points to a logic error in the code, and can be considered a bug.
-The current search is aborted. Specific information about the exception is
-printed in this error message.
+% DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+
+% DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+
+% DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
% DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4
The data returned by the database backend did not contain any data for the given
@@ -103,6 +110,45 @@ The data returned by the database backend contained data for the given domain
name, and it either matches the type or has a relevant type. The RRset that is
returned is printed.
+% DATASRC_DATABASE_ITERATE iterating zone %1
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+
+% DATASRC_DATABASE_ITERATE_END iterating zone finished
+While iterating through the zone, the program reached end of the data.
+
+% DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+
+% DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+
+% DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+
+% DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+
+% DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+
+% DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
+
% DATASRC_DO_QUERY handling query for '%1/%2'
A debug message indicating that a query for the given name and RR type is being
processed.
@@ -549,3 +595,38 @@ data source.
% DATASRC_UNEXPECTED_QUERY_STATE unexpected query state
This indicates a programming error. An internal task of unknown type was
generated.
+
+% DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3
+Debug information. A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+
+% DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3
+Debug information. A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+
+%DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes. The intermediate changes made through the updater won't
+be applied to the underlying database. The zone name, its class, and
+the underlying database name are shown in the log message.
+
+%DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails. The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation. In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid. The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+
+% DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3
+Debug information. A set of updates to a zone has been successfully
+committed to the corresponding database backend. The zone name,
+its class and the database name are printed.
diff --git a/src/lib/datasrc/factory.cc b/src/lib/datasrc/factory.cc
new file mode 100644
index 0000000..eddd4f4
--- /dev/null
+++ b/src/lib/datasrc/factory.cc
@@ -0,0 +1,82 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "factory.h"
+
+#include "data_source.h"
+#include "database.h"
+#include "sqlite3_accessor.h"
+#include "memory_datasrc.h"
+
+#include <datasrc/logger.h>
+
+#include <dlfcn.h>
+
+using namespace isc::data;
+using namespace isc::datasrc;
+
+namespace isc {
+namespace datasrc {
+
+LibraryContainer::LibraryContainer(const std::string& name) {
+ ds_lib_ = dlopen(name.c_str(), RTLD_NOW | RTLD_LOCAL);
+ if (ds_lib_ == NULL) {
+ isc_throw(DataSourceLibraryError, dlerror());
+ }
+}
+
+LibraryContainer::~LibraryContainer() {
+ dlclose(ds_lib_);
+}
+
+void*
+LibraryContainer::getSym(const char* name) {
+ // Since dlsym can return NULL on success, we check for errors by
+ // first clearing any existing errors with dlerror(), then calling dlsym,
+ // and finally checking for errors with dlerror()
+ dlerror();
+
+ void *sym = dlsym(ds_lib_, name);
+
+ const char* dlsym_error = dlerror();
+ if (dlsym_error != NULL) {
+ isc_throw(DataSourceLibrarySymbolError, dlsym_error);
+ }
+
+ return (sym);
+}
+
+DataSourceClientContainer::DataSourceClientContainer(const std::string& type,
+ ConstElementPtr config)
+: ds_lib_(type + "_ds.so")
+{
+ // We are casting from a data to a function pointer here
+ // Some compilers (rightfully) complain about that, but
+ // c-style casts are accepted the most here. If we run
+ // into any that also don't like this, we might need to
+ // use some form of union cast or memory copy to get
+ // from the void* to the function pointer.
+ ds_creator* ds_create = (ds_creator*)ds_lib_.getSym("createInstance");
+ destructor_ = (ds_destructor*)ds_lib_.getSym("destroyInstance");
+
+ instance_ = ds_create(config);
+}
+
+DataSourceClientContainer::~DataSourceClientContainer() {
+ destructor_(instance_);
+}
+
+} // end namespace datasrc
+} // end namespace isc
+
diff --git a/src/lib/datasrc/factory.h b/src/lib/datasrc/factory.h
new file mode 100644
index 0000000..8db9ec9
--- /dev/null
+++ b/src/lib/datasrc/factory.h
@@ -0,0 +1,182 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATA_SOURCE_FACTORY_H
+#define __DATA_SOURCE_FACTORY_H 1
+
+#include <boost/noncopyable.hpp>
+
+#include <datasrc/data_source.h>
+#include <datasrc/client.h>
+#include <exceptions/exceptions.h>
+
+#include <cc/data.h>
+
+namespace isc {
+namespace datasrc {
+
+
+/// \brief Raised if there is an error loading the datasource implementation
+/// library
+class DataSourceLibraryError : public DataSourceError {
+public:
+ DataSourceLibraryError(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
+/// \brief Raised if there is an error reading a symbol from the datasource
+/// implementation library
+class DataSourceLibrarySymbolError : public DataSourceError {
+public:
+ DataSourceLibrarySymbolError(const char* file, size_t line,
+ const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
+/// \brief Raised if the given config contains bad data
+///
+/// Depending on the datasource type, the configuration may differ (for
+/// instance, the sqlite3 datasource needs a database file).
+class DataSourceConfigError : public DataSourceError {
+public:
+ DataSourceConfigError(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
+ // This exception is created in the dynamic modules. Apparently
+ // sunstudio can't handle it if we then automatically derive the
+ // destructor, so we provide it explicitely
+ ~DataSourceConfigError() throw() {}
+};
+
+typedef DataSourceClient* ds_creator(isc::data::ConstElementPtr config);
+typedef void ds_destructor(DataSourceClient* instance);
+
+/// \brief Container class for dynamically loaded libraries
+///
+/// This class is used to dlopen() a library, provides access to dlsym(),
+/// and cleans up the dlopened library when the instance of this class is
+/// destroyed.
+///
+/// Its main function is to provide RAII-style access to dlopen'ed libraries.
+///
+/// \note Currently it is Datasource-backend specific. If we have need for this
+/// in other places than for dynamically loading datasources, then, apart
+/// from moving it to another location, we also need to make the
+/// exceptions raised more general.
+class LibraryContainer : boost::noncopyable {
+public:
+ /// \brief Constructor
+ ///
+ /// \param name The name of the library (.so) file. This file must be in
+ /// the library path.
+ ///
+ /// \exception DataSourceLibraryError If the library cannot be found or
+ /// cannot be loaded.
+ LibraryContainer(const std::string& name);
+
+ /// \brief Destructor
+ ///
+ /// Cleans up the library by calling dlclose()
+ ~LibraryContainer();
+
+ /// \brief Retrieve a symbol
+ ///
+ /// This retrieves a symbol from the loaded library.
+ ///
+ /// \exception DataSourceLibrarySymbolError if the symbol cannot be found,
+ /// or if another error (as reported by dlerror() occurs.
+ ///
+ /// \param name The name of the symbol to retrieve
+ /// \return A pointer to the symbol. This may be NULL, and if so, indicates
+ /// the symbol does indeed exist, but has the value NULL itself.
+ /// If the symbol does not exist, a DataSourceLibrarySymbolError is
+ /// raised.
+ ///
+ /// \note The argument is a const char* (and not a std::string like the
+ /// argument in the constructor). This argument is always a fixed
+ /// string in the code, while the other can be read from
+ /// configuration, and needs modification
+ void* getSym(const char* name);
+private:
+ /// Pointer to the dynamically loaded library structure
+ void *ds_lib_;
+};
+
+
+/// \brief Container for a specific instance of a dynamically loaded
+/// DataSourceClient implementation
+///
+/// Given a datasource type and a type-specific set of configuration data,
+/// the corresponding dynamic library is loaded (if it hadn't been already),
+/// and an instance is created. This instance is stored within this structure,
+/// and can be accessed through getInstance(). Upon destruction of this
+/// container, the stored instance of the DataSourceClient is deleted with
+/// the destructor function provided by the loaded library.
+///
+/// The 'type' is actually the name of the library, minus the '_ds.so' postfix
+/// Datasource implementation libraries therefore have a fixed name, both for
+/// easy recognition and to reduce potential mistakes.
+/// For example, the sqlite3 implementation has the type 'sqlite3', and the
+/// derived filename 'sqlite3_ds.so'
+///
+/// There are of course some demands to an implementation, not all of which
+/// can be verified compile-time. It must provide a creator and destructor
+/// functions. The creator function must return an instance of a subclass of
+/// DataSourceClient. The prototypes of these functions are as follows:
+/// \code
+/// extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr cfg);
+///
+/// extern "C" void destroyInstance(isc::data::DataSourceClient* instance);
+/// \endcode
+class DataSourceClientContainer : boost::noncopyable {
+public:
+ /// \brief Constructor
+ ///
+ /// \exception DataSourceLibraryError if there is an error loading the
+ /// backend library
+ /// \exception DataSourceLibrarySymbolError if the library does not have
+ /// the needed symbols, or if there is an error reading them
+ /// \exception DataSourceConfigError if the given config is not correct
+ /// for the given type
+ ///
+ /// \param type The type of the datasource client. Based on the value of
+ /// type, a specific backend library is used, by appending the
+ /// string '_ds.so' to the given type, and loading that as the
+ /// implementation library
+ /// \param config Type-specific configuration data, see the documentation
+ /// of the datasource backend type for information on what
+ /// configuration data to pass.
+ DataSourceClientContainer(const std::string& type,
+ isc::data::ConstElementPtr config);
+
+ /// \brief Destructor
+ ~DataSourceClientContainer();
+
+ /// \brief Accessor to the instance
+ ///
+ /// \return Reference to the DataSourceClient instance contained in this
+ /// container.
+ DataSourceClient& getInstance() { return *instance_; }
+
+private:
+ DataSourceClient* instance_;
+ ds_destructor* destructor_;
+ LibraryContainer ds_lib_;
+};
+
+} // end namespace datasrc
+} // end namespace isc
+#endif // DATA_SOURCE_FACTORY_H
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/iterator.h b/src/lib/datasrc/iterator.h
new file mode 100644
index 0000000..0102fcb
--- /dev/null
+++ b/src/lib/datasrc/iterator.h
@@ -0,0 +1,61 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dns/rrset.h>
+
+#include <boost/noncopyable.hpp>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Read-only iterator to a zone.
+ *
+ * You can get an instance of (descendand of) ZoneIterator from
+ * DataSourceClient::getIterator() method. The actual concrete implementation
+ * will be different depending on the actual data source used. This is the
+ * abstract interface.
+ *
+ * There's no way to start iterating from the beginning again or return.
+ */
+class ZoneIterator : public boost::noncopyable {
+public:
+ /**
+ * \brief Destructor
+ *
+ * Virtual destructor. It is empty, but ensures the right destructor from
+ * descendant is called.
+ */
+ virtual ~ ZoneIterator() { }
+
+ /**
+ * \brief Get next RRset from the zone.
+ *
+ * This returns the next RRset in the zone as a shared pointer. The
+ * shared pointer is used to allow both accessing in-memory data and
+ * automatic memory management.
+ *
+ * Any special order is not guaranteed.
+ *
+ * While this can potentially throw anything (including standard allocation
+ * errors), it should be rare.
+ *
+ * \return Pointer to the next RRset or NULL pointer when the iteration
+ * gets to the end of the zone.
+ */
+ virtual isc::dns::ConstRRsetPtr getNextRRset() = 0;
+};
+
+}
+}
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index d06cd9b..4c9e53f 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -16,6 +16,9 @@
#include <cassert>
#include <boost/shared_ptr.hpp>
#include <boost/bind.hpp>
+#include <boost/foreach.hpp>
+
+#include <exceptions/exceptions.h>
#include <dns/name.h>
#include <dns/rrclass.h>
@@ -25,13 +28,40 @@
#include <datasrc/memory_datasrc.h>
#include <datasrc/rbtree.h>
#include <datasrc/logger.h>
+#include <datasrc/iterator.h>
+#include <datasrc/data_source.h>
+#include <datasrc/factory.h>
+
+#include <cc/data.h>
using namespace std;
using namespace isc::dns;
+using namespace isc::data;
namespace isc {
namespace datasrc {
+namespace {
+// Some type aliases
+/*
+ * Each domain consists of some RRsets. They will be looked up by the
+ * RRType.
+ *
+ * The use of map is questionable with regard to performance - there'll
+ * be usually only few RRsets in the domain, so the log n benefit isn't
+ * much and a vector/array might be faster due to its simplicity and
+ * continuous memory location. But this is unlikely to be a performance
+ * critical place and map has better interface for the lookups, so we use
+ * that.
+ */
+typedef map<RRType, ConstRRsetPtr> Domain;
+typedef Domain::value_type DomainPair;
+typedef boost::shared_ptr<Domain> DomainPtr;
+// The tree stores domains
+typedef RBTree<Domain> DomainTree;
+typedef RBNode<Domain> DomainNode;
+}
+
// Private data and hidden methods of InMemoryZoneFinder
struct InMemoryZoneFinder::InMemoryZoneFinderImpl {
// Constructor
@@ -44,25 +74,6 @@ struct InMemoryZoneFinder::InMemoryZoneFinderImpl {
DomainPtr origin_domain(new Domain);
origin_data_->setData(origin_domain);
}
-
- // Some type aliases
- /*
- * Each domain consists of some RRsets. They will be looked up by the
- * RRType.
- *
- * The use of map is questionable with regard to performance - there'll
- * be usually only few RRsets in the domain, so the log n benefit isn't
- * much and a vector/array might be faster due to its simplicity and
- * continuous memory location. But this is unlikely to be a performance
- * critical place and map has better interface for the lookups, so we use
- * that.
- */
- typedef map<RRType, ConstRRsetPtr> Domain;
- typedef Domain::value_type DomainPair;
- typedef boost::shared_ptr<Domain> DomainPtr;
- // The tree stores domains
- typedef RBTree<Domain> DomainTree;
- typedef RBNode<Domain> DomainNode;
static const DomainNode::Flags DOMAINFLAG_WILD = DomainNode::FLAG_USER1;
// Information about the zone
@@ -634,7 +645,7 @@ InMemoryZoneFinder::load(const string& filename) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_LOAD).arg(getOrigin()).
arg(filename);
// Load it into a temporary tree
- InMemoryZoneFinderImpl::DomainTree tmp;
+ DomainTree tmp;
masterLoad(filename.c_str(), getOrigin(), getClass(),
boost::bind(&InMemoryZoneFinderImpl::addFromLoad, impl_, _1, &tmp));
// If it went well, put it inside
@@ -655,6 +666,12 @@ InMemoryZoneFinder::getFileName() const {
return (impl_->file_name_);
}
+isc::dns::Name
+InMemoryZoneFinder::findPreviousName(const isc::dns::Name&) const {
+ isc_throw(NotImplemented, "InMemory data source doesn't support DNSSEC "
+ "yet, can't find previous name");
+}
+
/// Implementation details for \c InMemoryClient hidden from the public
/// interface.
///
@@ -700,8 +717,232 @@ InMemoryClient::addZone(ZoneFinderPtr zone_finder) {
InMemoryClient::FindResult
InMemoryClient::findZone(const isc::dns::Name& name) const {
LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_FIND_ZONE).arg(name);
- return (FindResult(impl_->zone_table.findZone(name).code,
- impl_->zone_table.findZone(name).zone));
+ ZoneTable::FindResult result(impl_->zone_table.findZone(name));
+ return (FindResult(result.code, result.zone));
+}
+
+namespace {
+
+class MemoryIterator : public ZoneIterator {
+private:
+ RBTreeNodeChain<Domain> chain_;
+ Domain::const_iterator dom_iterator_;
+ const DomainTree& tree_;
+ const DomainNode* node_;
+ bool ready_;
+public:
+ MemoryIterator(const DomainTree& tree, const Name& origin) :
+ tree_(tree),
+ ready_(true)
+ {
+ // Find the first node (origin) and preserve the node chain for future
+ // searches
+ DomainTree::Result result(tree_.find<void*>(origin, &node_, chain_,
+ NULL, NULL));
+ // It can't happen that the origin is not in there
+ if (result != DomainTree::EXACTMATCH) {
+ isc_throw(Unexpected,
+ "In-memory zone corrupted, missing origin node");
+ }
+ // Initialize the iterator if there's somewhere to point to
+ if (node_ != NULL && node_->getData() != DomainPtr()) {
+ dom_iterator_ = node_->getData()->begin();
+ }
+ }
+
+ virtual ConstRRsetPtr getNextRRset() {
+ if (!ready_) {
+ isc_throw(Unexpected, "Iterating past the zone end");
+ }
+ /*
+ * This cycle finds the first nonempty node with yet unused RRset.
+ * If it is NULL, we run out of nodes. If it is empty, it doesn't
+ * contain any RRsets. If we are at the end, just get to next one.
+ */
+ while (node_ != NULL && (node_->getData() == DomainPtr() ||
+ dom_iterator_ == node_->getData()->end())) {
+ node_ = tree_.nextNode(chain_);
+ // If there's a node, initialize the iterator and check next time
+ // if the map is empty or not
+ if (node_ != NULL && node_->getData() != NULL) {
+ dom_iterator_ = node_->getData()->begin();
+ }
+ }
+ if (node_ == NULL) {
+ // That's all, folks
+ ready_ = false;
+ return (ConstRRsetPtr());
+ }
+ // The iterator points to the next yet unused RRset now
+ ConstRRsetPtr result(dom_iterator_->second);
+ // This one is used, move it to the next time for next call
+ ++dom_iterator_;
+
+ return (result);
+ }
+};
+
+} // End of anonymous namespace
+
+ZoneIteratorPtr
+InMemoryClient::getIterator(const Name& name) const {
+ ZoneTable::FindResult result(impl_->zone_table.findZone(name));
+ if (result.code != result::SUCCESS) {
+ isc_throw(DataSourceError, "No such zone: " + name.toText());
+ }
+
+ const InMemoryZoneFinder*
+ zone(dynamic_cast<const InMemoryZoneFinder*>(result.zone.get()));
+ if (zone == NULL) {
+ /*
+ * TODO: This can happen only during some of the tests and only as
+ * a temporary solution. This should be fixed by #1159 and then
+ * this cast and check shouldn't be necessary. We don't have
+ * test for handling a "can not happen" condition.
+ */
+ isc_throw(Unexpected, "The zone at " + name.toText() +
+ " is not InMemoryZoneFinder");
+ }
+ return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name)));
+}
+
+ZoneUpdaterPtr
+InMemoryClient::getUpdater(const isc::dns::Name&, bool) const {
+ isc_throw(isc::NotImplemented, "Update attempt on in memory data source");
+}
+
+
+namespace {
+// convencience function to add an error message to a list of those
+// (TODO: move functions like these to some util lib?)
+void
+addError(ElementPtr errors, const std::string& error) {
+ if (errors != ElementPtr() && errors->getType() == Element::list) {
+ errors->add(Element::create(error));
+ }
+}
+
+/// Check if the given element exists in the map, and if it is a string
+bool
+checkConfigElementString(ConstElementPtr config, const std::string& name,
+ ElementPtr errors)
+{
+ if (!config->contains(name)) {
+ addError(errors,
+ "Config for memory backend does not contain a '"
+ "type"
+ "' value");
+ return false;
+ } else if (!config->get(name) ||
+ config->get(name)->getType() != Element::string) {
+ addError(errors, "value of " + name +
+ " in memory backend config is not a string");
+ return false;
+ } else {
+ return true;
+ }
+}
+
+bool
+checkZoneConfig(ConstElementPtr config, ElementPtr errors) {
+ bool result = true;
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Elements in memory backend's zone list must be maps");
+ result = false;
+ } else {
+ if (!checkConfigElementString(config, "origin", errors)) {
+ result = false;
+ }
+ if (!checkConfigElementString(config, "file", errors)) {
+ result = false;
+ }
+ // we could add some existence/readabilty/parsability checks here
+ // if we want
+ }
+ return result;
+}
+
+bool
+checkConfig(ConstElementPtr config, ElementPtr errors) {
+ /* Specific configuration is under discussion, right now this accepts
+ * the 'old' configuration, see [TODO]
+ * So for memory datasource, we get a structure like this:
+ * { "type": string ("memory"),
+ * "class": string ("IN"/"CH"/etc),
+ * "zones": list
+ * }
+ * Zones list is a list of maps:
+ * { "origin": string,
+ * "file": string
+ * }
+ *
+ * At this moment we cannot be completely sure of the contents of the
+ * structure, so we have to do some more extensive tests than should
+ * strictly be necessary (e.g. existence and type of elements)
+ */
+ bool result = true;
+
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Base config for memory backend must be a map");
+ result = false;
+ } else {
+ if (!checkConfigElementString(config, "type", errors)) {
+ result = false;
+ } else {
+ if (config->get("type")->stringValue() != "memory") {
+ addError(errors,
+ "Config for memory backend is not of type \"memory\"");
+ result = false;
+ }
+ }
+ if (!checkConfigElementString(config, "class", errors)) {
+ result = false;
+ } else {
+ try {
+ RRClass rrc(config->get("class")->stringValue());
+ } catch (const isc::Exception& rrce) {
+ addError(errors,
+ "Error parsing class config for memory backend: " +
+ std::string(rrce.what()));
+ result = false;
+ }
+ }
+ if (!config->contains("zones")) {
+ addError(errors, "No 'zones' element in memory backend config");
+ result = false;
+ } else if (!config->get("zones") ||
+ config->get("zones")->getType() != Element::list) {
+ addError(errors, "'zones' element in memory backend config is not a list");
+ result = false;
+ } else {
+ BOOST_FOREACH(ConstElementPtr zone_config,
+ config->get("zones")->listValue()) {
+ if (!checkZoneConfig(zone_config, errors)) {
+ result = false;
+ }
+ }
+ }
+ }
+
+ return (result);
+ return true;
+}
+
+} // end anonymous namespace
+
+DataSourceClient *
+createInstance(isc::data::ConstElementPtr config) {
+ ElementPtr errors(Element::createList());
+ if (!checkConfig(config, errors)) {
+ isc_throw(DataSourceConfigError, errors->str());
+ }
+ return (new InMemoryClient());
}
+
+void destroyInstance(DataSourceClient* instance) {
+ delete instance;
+}
+
+
} // end of namespace datasrc
-} // end of namespace dns
+} // end of namespace isc
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 0234a91..cf467a2 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -22,6 +22,8 @@
#include <datasrc/zonetable.h>
#include <datasrc/client.h>
+#include <cc/data.h>
+
namespace isc {
namespace dns {
class Name;
@@ -75,6 +77,12 @@ public:
isc::dns::RRsetList* target = NULL,
const FindOptions options = FIND_DEFAULT);
+ /// \brief Imelementation of the ZoneFinder::findPreviousName method
+ ///
+ /// This one throws NotImplemented exception, as InMemory doesn't
+ /// support DNSSEC currently.
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query) const;
+
/// \brief Inserts an rrset into the zone.
///
/// It puts another RRset into the zone.
@@ -182,6 +190,11 @@ private:
struct InMemoryZoneFinderImpl;
InMemoryZoneFinderImpl* impl_;
//@}
+ // The friend here is for InMemoryClient::getIterator. The iterator
+ // needs to access the data inside the zone, so the InMemoryClient
+ // extracts the pointer to data and puts it into the iterator.
+ // The access is read only.
+ friend class InMemoryClient;
};
/// \brief A data source client that holds all necessary data in memory.
@@ -208,7 +221,7 @@ private:
/// while it wouldn't be safe to delete unnecessary zones inside the dedicated
/// backend.
///
-/// The findZone() method takes a domain name and returns the best matching
+/// The findZone() method takes a domain name and returns the best matching
/// \c InMemoryZoneFinder in the form of (Boost) shared pointer, so that it can
/// provide the general interface for all data sources.
class InMemoryClient : public DataSourceClient {
@@ -258,12 +271,51 @@ public:
/// For other details see \c DataSourceClient::findZone().
virtual FindResult findZone(const isc::dns::Name& name) const;
+ /// \brief Implementation of the getIterator method
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const;
+
+ /// In-memory data source is read-only, so this derived method will
+ /// result in a NotImplemented exception.
+ ///
+ /// \note We plan to use a database-based data source as a backend
+ /// persistent storage for an in-memory data source. When it's
+ /// implemented we may also want to allow the user of the in-memory client
+ /// to update via its updater (this may or may not be a good idea and
+ /// is subject to further discussions).
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+ bool replace) const;
+
private:
// TODO: Do we still need the PImpl if nobody should manipulate this class
// directly any more (it should be handled through DataSourceClient)?
class InMemoryClientImpl;
InMemoryClientImpl* impl_;
};
+
+/// \brief Creates an instance of the Memory datasource client
+///
+/// Currently the configuration passed here must be a MapElement, formed as
+/// follows:
+/// \code
+/// { "type": string ("memory"),
+/// "class": string ("IN"/"CH"/etc),
+/// "zones": list
+/// }
+/// Zones list is a list of maps:
+/// { "origin": string,
+/// "file": string
+/// }
+/// \endcode
+/// (i.e. the configuration that was used prior to the datasource refactor)
+///
+/// This configuration setup is currently under discussion and will change in
+/// the near future.
+extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr config);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void destroyInstance(DataSourceClient* instance);
+
+
}
}
#endif // __DATA_SOURCE_MEMORY_H
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
index 817d530..7e5734c 100644
--- a/src/lib/datasrc/sqlite3_accessor.cc
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -14,41 +14,133 @@
#include <sqlite3.h>
+#include <string>
+#include <vector>
+
+#include <boost/foreach.hpp>
+
#include <datasrc/sqlite3_accessor.h>
#include <datasrc/logger.h>
#include <datasrc/data_source.h>
+#include <datasrc/factory.h>
#include <util/filename.h>
+using namespace std;
+using namespace isc::data;
+
+#define SQLITE_SCHEMA_VERSION 1
+
+#define CONFIG_ITEM_DATABASE_FILE "database_file"
+
namespace isc {
namespace datasrc {
+// The following enum and char* array define the SQL statements commonly
+// used in this implementation. Corresponding prepared statements (of
+// type sqlite3_stmt*) are maintained in the statements_ array of the
+// SQLite3Parameters structure.
+
+enum StatementID {
+ ZONE = 0,
+ ANY = 1,
+ ANY_SUB = 2,
+ BEGIN = 3,
+ COMMIT = 4,
+ ROLLBACK = 5,
+ DEL_ZONE_RECORDS = 6,
+ ADD_RECORD = 7,
+ DEL_RECORD = 8,
+ ITERATE = 9,
+ FIND_PREVIOUS = 10,
+ NUM_STATEMENTS = 11
+};
+
+const char* const text_statements[NUM_STATEMENTS] = {
+ // note for ANY and ITERATE: the order of the SELECT values is
+ // specifically chosen to match the enum values in RecordColumns
+ "SELECT id FROM zones WHERE name=?1 AND rdclass = ?2", // ZONE
+ "SELECT rdtype, ttl, sigtype, rdata FROM records " // ANY
+ "WHERE zone_id=?1 AND name=?2",
+ "SELECT rdtype, ttl, sigtype, rdata " // ANY_SUB
+ "FROM records WHERE zone_id=?1 AND name LIKE (\"%.\" || ?2)",
+ "BEGIN", // BEGIN
+ "COMMIT", // COMMIT
+ "ROLLBACK", // ROLLBACK
+ "DELETE FROM records WHERE zone_id=?1", // DEL_ZONE_RECORDS
+ "INSERT INTO records " // ADD_RECORD
+ "(zone_id, name, rname, ttl, rdtype, sigtype, rdata) "
+ "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+ "DELETE FROM records WHERE zone_id=?1 AND name=?2 " // DEL_RECORD
+ "AND rdtype=?3 AND rdata=?4",
+ "SELECT rdtype, ttl, sigtype, rdata, name FROM records " // ITERATE
+ "WHERE zone_id = ?1 ORDER BY name, rdtype",
+ /*
+ * This one looks for previous name with NSEC record. It is done by
+ * using the reversed name. The NSEC is checked because we need to
+ * skip glue data, which don't have the NSEC.
+ */
+ "SELECT name FROM records " // FIND_PREVIOUS
+ "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+ "rname < $2 ORDER BY rname DESC LIMIT 1"
+};
+
struct SQLite3Parameters {
SQLite3Parameters() :
- db_(NULL), version_(-1),
- q_zone_(NULL), q_any_(NULL)
- /*q_record_(NULL), q_addrs_(NULL), q_referral_(NULL),
- q_count_(NULL), q_previous_(NULL), q_nsec3_(NULL),
- q_prevnsec3_(NULL) */
- {}
+ db_(NULL), version_(-1), updating_zone(false), updated_zone_id(-1)
+ {
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ statements_[i] = NULL;
+ }
+ }
+
sqlite3* db_;
int version_;
- sqlite3_stmt* q_zone_;
- sqlite3_stmt* q_any_;
- /*
- TODO: Yet unneeded statements
- sqlite3_stmt* q_record_;
- sqlite3_stmt* q_addrs_;
- sqlite3_stmt* q_referral_;
- sqlite3_stmt* q_count_;
- sqlite3_stmt* q_previous_;
- sqlite3_stmt* q_nsec3_;
- sqlite3_stmt* q_prevnsec3_;
- */
+ sqlite3_stmt* statements_[NUM_STATEMENTS];
+ bool updating_zone; // whether or not updating the zone
+ int updated_zone_id; // valid only when updating_zone is true
+};
+
+// This is a helper class to encapsulate the code logic of executing
+// a specific SQLite3 statement, ensuring the corresponding prepared
+// statement is always reset whether the execution is completed successfully
+// or it results in an exception.
+// Note that an object of this class is intended to be used for "ephemeral"
+// statement, which is completed with a single "step" (normally within a
+// single call to an SQLite3Database method). In particular, it cannot be
+// used for "SELECT" variants, which generally expect multiple matching rows.
+class StatementProcessor {
+public:
+ // desc will be used on failure in the what() message of the resulting
+ // DataSourceError exception.
+ StatementProcessor(SQLite3Parameters& dbparameters, StatementID stmt_id,
+ const char* desc) :
+ dbparameters_(dbparameters), stmt_id_(stmt_id), desc_(desc)
+ {
+ sqlite3_clear_bindings(dbparameters_.statements_[stmt_id_]);
+ }
+
+ ~StatementProcessor() {
+ sqlite3_reset(dbparameters_.statements_[stmt_id_]);
+ }
+
+ void exec() {
+ if (sqlite3_step(dbparameters_.statements_[stmt_id_]) != SQLITE_DONE) {
+ sqlite3_reset(dbparameters_.statements_[stmt_id_]);
+ isc_throw(DataSourceError, "failed to " << desc_ << ": " <<
+ sqlite3_errmsg(dbparameters_.db_));
+ }
+ }
+
+private:
+ SQLite3Parameters& dbparameters_;
+ const StatementID stmt_id_;
+ const char* const desc_;
};
-SQLite3Database::SQLite3Database(const std::string& filename,
- const isc::dns::RRClass& rrclass) :
+SQLite3Accessor::SQLite3Accessor(const std::string& filename,
+ const isc::dns::RRClass& rrclass) :
dbparameters_(new SQLite3Parameters),
+ filename_(filename),
class_(rrclass.toText()),
database_name_("sqlite3_" +
isc::util::Filename(filename).nameAndExtension())
@@ -58,6 +150,25 @@ SQLite3Database::SQLite3Database(const std::string& filename,
open(filename);
}
+SQLite3Accessor::SQLite3Accessor(const std::string& filename,
+ const string& rrclass) :
+ dbparameters_(new SQLite3Parameters),
+ filename_(filename),
+ class_(rrclass),
+ database_name_("sqlite3_" +
+ isc::util::Filename(filename).nameAndExtension())
+{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_NEWCONN);
+
+ open(filename);
+}
+
+boost::shared_ptr<DatabaseAccessor>
+SQLite3Accessor::clone() {
+ return (boost::shared_ptr<DatabaseAccessor>(new SQLite3Accessor(filename_,
+ class_)));
+}
+
namespace {
// This is a helper class to initialize a Sqlite3 DB safely. An object of
@@ -70,35 +181,10 @@ namespace {
class Initializer {
public:
~Initializer() {
- if (params_.q_zone_ != NULL) {
- sqlite3_finalize(params_.q_zone_);
- }
- if (params_.q_any_ != NULL) {
- sqlite3_finalize(params_.q_any_);
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ sqlite3_finalize(params_.statements_[i]);
}
- /*
- if (params_.q_record_ != NULL) {
- sqlite3_finalize(params_.q_record_);
- }
- if (params_.q_addrs_ != NULL) {
- sqlite3_finalize(params_.q_addrs_);
- }
- if (params_.q_referral_ != NULL) {
- sqlite3_finalize(params_.q_referral_);
- }
- if (params_.q_count_ != NULL) {
- sqlite3_finalize(params_.q_count_);
- }
- if (params_.q_previous_ != NULL) {
- sqlite3_finalize(params_.q_previous_);
- }
- if (params_.q_nsec3_ != NULL) {
- sqlite3_finalize(params_.q_nsec3_);
- }
- if (params_.q_prevnsec3_ != NULL) {
- sqlite3_finalize(params_.q_prevnsec3_);
- }
- */
+
if (params_.db_ != NULL) {
sqlite3_close(params_.db_);
}
@@ -134,41 +220,6 @@ const char* const SCHEMA_LIST[] = {
NULL
};
-const char* const q_zone_str = "SELECT id FROM zones WHERE name=?1 AND rdclass = ?2";
-
-const char* const q_any_str = "SELECT rdtype, ttl, sigtype, rdata "
- "FROM records WHERE zone_id=?1 AND name=?2";
-
-/* TODO: Prune the statements, not everything will be needed maybe?
-const char* const q_record_str = "SELECT rdtype, ttl, sigtype, rdata "
- "FROM records WHERE zone_id=?1 AND name=?2 AND "
- "((rdtype=?3 OR sigtype=?3) OR "
- "(rdtype='CNAME' OR sigtype='CNAME') OR "
- "(rdtype='NS' OR sigtype='NS'))";
-
-const char* const q_addrs_str = "SELECT rdtype, ttl, sigtype, rdata "
- "FROM records WHERE zone_id=?1 AND name=?2 AND "
- "(rdtype='A' OR sigtype='A' OR rdtype='AAAA' OR sigtype='AAAA')";
-
-const char* const q_referral_str = "SELECT rdtype, ttl, sigtype, rdata FROM "
- "records WHERE zone_id=?1 AND name=?2 AND"
- "(rdtype='NS' OR sigtype='NS' OR rdtype='DS' OR sigtype='DS' OR "
- "rdtype='DNAME' OR sigtype='DNAME')";
-
-const char* const q_count_str = "SELECT COUNT(*) FROM records "
- "WHERE zone_id=?1 AND rname LIKE (?2 || '%');";
-
-const char* const q_previous_str = "SELECT name FROM records "
- "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
- "rname < $2 ORDER BY rname DESC LIMIT 1";
-
-const char* const q_nsec3_str = "SELECT rdtype, ttl, rdata FROM nsec3 "
- "WHERE zone_id = ?1 AND hash = $2";
-
-const char* const q_prevnsec3_str = "SELECT hash FROM nsec3 "
- "WHERE zone_id = ?1 AND hash <= $2 ORDER BY hash DESC LIMIT 1";
- */
-
sqlite3_stmt*
prepare(sqlite3* const db, const char* const statement) {
sqlite3_stmt* prepared = NULL;
@@ -179,47 +230,101 @@ prepare(sqlite3* const db, const char* const statement) {
return (prepared);
}
-void
-checkAndSetupSchema(Initializer* initializer) {
- sqlite3* const db = initializer->params_.db_;
+// small function to sleep for 0.1 seconds, needed when waiting for
+// exclusive database locks (which should only occur on startup, and only
+// when the database has not been created yet)
+void doSleep() {
+ struct timespec req;
+ req.tv_sec = 0;
+ req.tv_nsec = 100000000;
+ nanosleep(&req, NULL);
+}
+// returns the schema version if the schema version table exists
+// returns -1 if it does not
+int checkSchemaVersion(sqlite3* db) {
sqlite3_stmt* prepared = NULL;
- if (sqlite3_prepare_v2(db, "SELECT version FROM schema_version", -1,
- &prepared, NULL) == SQLITE_OK &&
- sqlite3_step(prepared) == SQLITE_ROW) {
- initializer->params_.version_ = sqlite3_column_int(prepared, 0);
- sqlite3_finalize(prepared);
- } else {
- logger.info(DATASRC_SQLITE_SETUP);
- if (prepared != NULL) {
- sqlite3_finalize(prepared);
+ // At this point in time, the database might be exclusively locked, in
+ // which case even prepare() will return BUSY, so we may need to try a
+ // few times
+ for (size_t i = 0; i < 50; ++i) {
+ int rc = sqlite3_prepare_v2(db, "SELECT version FROM schema_version",
+ -1, &prepared, NULL);
+ if (rc == SQLITE_ERROR) {
+ // this is the error that is returned when the table does not
+ // exist
+ return (-1);
+ } else if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(SQLite3Error, "Unable to prepare version query: "
+ << rc << " " << sqlite3_errmsg(db));
}
+ doSleep();
+ }
+ if (sqlite3_step(prepared) != SQLITE_ROW) {
+ isc_throw(SQLite3Error,
+ "Unable to query version: " << sqlite3_errmsg(db));
+ }
+ int version = sqlite3_column_int(prepared, 0);
+ sqlite3_finalize(prepared);
+ return (version);
+}
+
+// return db version
+int create_database(sqlite3* db) {
+ // try to get an exclusive lock. Once that is obtained, do the version
+ // check *again*, just in case this process was racing another
+ //
+ // try for 5 secs (50*0.1)
+ int rc;
+ logger.info(DATASRC_SQLITE_SETUP);
+ for (size_t i = 0; i < 50; ++i) {
+ rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL,
+ NULL);
+ if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(SQLite3Error, "Unable to acquire exclusive lock "
+ "for database creation: " << sqlite3_errmsg(db));
+ }
+ doSleep();
+ }
+ int schema_version = checkSchemaVersion(db);
+ if (schema_version == -1) {
for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
SQLITE_OK) {
isc_throw(SQLite3Error,
- "Failed to set up schema " << SCHEMA_LIST[i]);
+ "Failed to set up schema " << SCHEMA_LIST[i]);
}
}
+ sqlite3_exec(db, "COMMIT TRANSACTION", NULL, NULL, NULL);
+ return (SQLITE_SCHEMA_VERSION);
+ } else {
+ return (schema_version);
}
+}
+
+void
+checkAndSetupSchema(Initializer* initializer) {
+ sqlite3* const db = initializer->params_.db_;
- initializer->params_.q_zone_ = prepare(db, q_zone_str);
- initializer->params_.q_any_ = prepare(db, q_any_str);
- /* TODO: Yet unneeded statements
- initializer->params_.q_record_ = prepare(db, q_record_str);
- initializer->params_.q_addrs_ = prepare(db, q_addrs_str);
- initializer->params_.q_referral_ = prepare(db, q_referral_str);
- initializer->params_.q_count_ = prepare(db, q_count_str);
- initializer->params_.q_previous_ = prepare(db, q_previous_str);
- initializer->params_.q_nsec3_ = prepare(db, q_nsec3_str);
- initializer->params_.q_prevnsec3_ = prepare(db, q_prevnsec3_str);
- */
+ int schema_version = checkSchemaVersion(db);
+ if (schema_version != SQLITE_SCHEMA_VERSION) {
+ schema_version = create_database(db);
+ }
+ initializer->params_.version_ = schema_version;
+
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ initializer->params_.statements_[i] = prepare(db, text_statements[i]);
+ }
}
}
void
-SQLite3Database::open(const std::string& name) {
+SQLite3Accessor::open(const std::string& name) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNOPEN).arg(name);
if (dbparameters_->db_ != NULL) {
// There shouldn't be a way to trigger this anyway
@@ -233,19 +338,18 @@ SQLite3Database::open(const std::string& name) {
}
checkAndSetupSchema(&initializer);
- initializer.move(dbparameters_);
+ initializer.move(dbparameters_.get());
}
-SQLite3Database::~SQLite3Database() {
+SQLite3Accessor::~SQLite3Accessor() {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_DROPCONN);
if (dbparameters_->db_ != NULL) {
close();
}
- delete dbparameters_;
}
void
-SQLite3Database::close(void) {
+SQLite3Accessor::close(void) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNCLOSE);
if (dbparameters_->db_ == NULL) {
isc_throw(DataSourceError,
@@ -253,109 +357,63 @@ SQLite3Database::close(void) {
}
// XXX: sqlite3_finalize() could fail. What should we do in that case?
- sqlite3_finalize(dbparameters_->q_zone_);
- dbparameters_->q_zone_ = NULL;
-
- sqlite3_finalize(dbparameters_->q_any_);
- dbparameters_->q_any_ = NULL;
-
- /* TODO: Once they are needed or not, uncomment or drop
- sqlite3_finalize(dbparameters->q_record_);
- dbparameters->q_record_ = NULL;
-
- sqlite3_finalize(dbparameters->q_addrs_);
- dbparameters->q_addrs_ = NULL;
-
- sqlite3_finalize(dbparameters->q_referral_);
- dbparameters->q_referral_ = NULL;
-
- sqlite3_finalize(dbparameters->q_count_);
- dbparameters->q_count_ = NULL;
-
- sqlite3_finalize(dbparameters->q_previous_);
- dbparameters->q_previous_ = NULL;
-
- sqlite3_finalize(dbparameters->q_prevnsec3_);
- dbparameters->q_prevnsec3_ = NULL;
-
- sqlite3_finalize(dbparameters->q_nsec3_);
- dbparameters->q_nsec3_ = NULL;
- */
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ sqlite3_finalize(dbparameters_->statements_[i]);
+ dbparameters_->statements_[i] = NULL;
+ }
sqlite3_close(dbparameters_->db_);
dbparameters_->db_ = NULL;
}
std::pair<bool, int>
-SQLite3Database::getZone(const isc::dns::Name& name) const {
+SQLite3Accessor::getZone(const std::string& name) const {
int rc;
+ sqlite3_stmt* const stmt = dbparameters_->statements_[ZONE];
// Take the statement (simple SELECT id FROM zones WHERE...)
// and prepare it (bind the parameters to it)
- sqlite3_reset(dbparameters_->q_zone_);
- rc = sqlite3_bind_text(dbparameters_->q_zone_, 1, name.toText().c_str(),
- -1, SQLITE_TRANSIENT);
+ sqlite3_reset(stmt);
+ rc = sqlite3_bind_text(stmt, 1, name.c_str(), -1, SQLITE_STATIC);
if (rc != SQLITE_OK) {
isc_throw(SQLite3Error, "Could not bind " << name <<
" to SQL statement (zone)");
}
- rc = sqlite3_bind_text(dbparameters_->q_zone_, 2, class_.c_str(), -1,
- SQLITE_STATIC);
+ rc = sqlite3_bind_text(stmt, 2, class_.c_str(), -1, SQLITE_STATIC);
if (rc != SQLITE_OK) {
isc_throw(SQLite3Error, "Could not bind " << class_ <<
" to SQL statement (zone)");
}
// Get the data there and see if it found anything
- rc = sqlite3_step(dbparameters_->q_zone_);
- std::pair<bool, int> result;
+ rc = sqlite3_step(stmt);
if (rc == SQLITE_ROW) {
- result = std::pair<bool, int>(true,
- sqlite3_column_int(dbparameters_->
- q_zone_, 0));
- } else {
- result = std::pair<bool, int>(false, 0);
+ const int zone_id = sqlite3_column_int(stmt, 0);
+ sqlite3_reset(stmt);
+ return (pair<bool, int>(true, zone_id));
+ } else if (rc == SQLITE_DONE) {
+ // Free resources
+ sqlite3_reset(stmt);
+ return (pair<bool, int>(false, 0));
}
- // Free resources
- sqlite3_reset(dbparameters_->q_zone_);
-
- return (result);
-}
-void
-SQLite3Database::searchForRecords(int zone_id, const std::string& name) {
- resetSearch();
- if (sqlite3_bind_int(dbparameters_->q_any_, 1, zone_id) != SQLITE_OK) {
- isc_throw(DataSourceError,
- "Error in sqlite3_bind_int() for zone_id " <<
- zone_id << ": " << sqlite3_errmsg(dbparameters_->db_));
- }
- // use transient since name is a ref and may disappear
- if (sqlite3_bind_text(dbparameters_->q_any_, 2, name.c_str(), -1,
- SQLITE_TRANSIENT) != SQLITE_OK) {
- isc_throw(DataSourceError,
- "Error in sqlite3_bind_text() for name " <<
- name << ": " << sqlite3_errmsg(dbparameters_->db_));
- }
+ sqlite3_reset(stmt);
+ isc_throw(DataSourceError, "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ // Compilers might not realize isc_throw always throws
+ return (std::pair<bool, int>(false, 0));
}
namespace {
-// This helper function converts from the unsigned char* type (used by
-// sqlite3) to char* (wanted by std::string). Technically these types
-// might not be directly convertable
-// In case sqlite3_column_text() returns NULL, we just make it an
-// empty string.
-// The sqlite3parameters value is only used to check the error code if
-// ucp == NULL
+
+// Conversion to plain char
const char*
-convertToPlainChar(const unsigned char* ucp,
- SQLite3Parameters* dbparameters) {
+convertToPlainChar(const unsigned char* ucp, sqlite3 *db) {
if (ucp == NULL) {
// The field can really be NULL, in which case we return an
// empty string, or sqlite may have run out of memory, in
// which case we raise an error
- if (dbparameters != NULL &&
- sqlite3_errcode(dbparameters->db_) == SQLITE_NOMEM) {
+ if (sqlite3_errcode(db) == SQLITE_NOMEM) {
isc_throw(DataSourceError,
"Sqlite3 backend encountered a memory allocation "
"error in sqlite3_column_text()");
@@ -366,47 +424,360 @@ convertToPlainChar(const unsigned char* ucp,
const void* p = ucp;
return (static_cast<const char*>(p));
}
+
}
+class SQLite3Accessor::Context : public DatabaseAccessor::IteratorContext {
+public:
+ // Construct an iterator for all records. When constructed this
+ // way, the getNext() call will copy all fields
+ Context(const boost::shared_ptr<const SQLite3Accessor>& accessor, int id) :
+ iterator_type_(ITT_ALL),
+ accessor_(accessor),
+ statement_(NULL),
+ name_("")
+ {
+ // We create the statement now and then just keep getting data from it
+ statement_ = prepare(accessor->dbparameters_->db_,
+ text_statements[ITERATE]);
+ bindZoneId(id);
+ }
-bool
-SQLite3Database::getNextRecord(std::string columns[], size_t column_count) {
- if (column_count != COLUMN_COUNT) {
+ // Construct an iterator for records with a specific name. When constructed
+ // this way, the getNext() call will copy all fields except name
+ Context(const boost::shared_ptr<const SQLite3Accessor>& accessor, int id,
+ const std::string& name, bool subdomains) :
+ iterator_type_(ITT_NAME),
+ accessor_(accessor),
+ statement_(NULL),
+ name_(name)
+
+ {
+ // We create the statement now and then just keep getting data from it
+ statement_ = prepare(accessor->dbparameters_->db_,
+ subdomains ? text_statements[ANY_SUB] :
+ text_statements[ANY]);
+ bindZoneId(id);
+ bindName(name_);
+ }
+
+ bool getNext(std::string (&data)[COLUMN_COUNT]) {
+ // If there's another row, get it
+ // If finalize has been called (e.g. when previous getNext() got
+ // SQLITE_DONE), directly return false
+ if (statement_ == NULL) {
+ return false;
+ }
+ const int rc(sqlite3_step(statement_));
+ if (rc == SQLITE_ROW) {
+ // For both types, we copy the first four columns
+ copyColumn(data, TYPE_COLUMN);
+ copyColumn(data, TTL_COLUMN);
+ copyColumn(data, SIGTYPE_COLUMN);
+ copyColumn(data, RDATA_COLUMN);
+ // Only copy Name if we are iterating over every record
+ if (iterator_type_ == ITT_ALL) {
+ copyColumn(data, NAME_COLUMN);
+ }
+ return (true);
+ } else if (rc != SQLITE_DONE) {
isc_throw(DataSourceError,
- "Datasource backend caller did not pass a column array "
- "of size " << COLUMN_COUNT << " to getNextRecord()");
+ "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ finalize();
+ return (false);
}
- sqlite3_stmt* current_stmt = dbparameters_->q_any_;
- const int rc = sqlite3_step(current_stmt);
+ virtual ~Context() {
+ finalize();
+ }
- if (rc == SQLITE_ROW) {
- for (int column = 0; column < column_count; ++column) {
- try {
- columns[column] = convertToPlainChar(sqlite3_column_text(
- current_stmt, column),
- dbparameters_);
- } catch (const std::bad_alloc&) {
+private:
+ // Depending on which constructor is called, behaviour is slightly
+ // different. We keep track of what to do with the iterator type
+ // See description of getNext() and the constructors
+ enum IteratorType {
+ ITT_ALL,
+ ITT_NAME
+ };
+
+ void copyColumn(std::string (&data)[COLUMN_COUNT], int column) {
+ data[column] = convertToPlainChar(sqlite3_column_text(statement_,
+ column),
+ accessor_->dbparameters_->db_);
+ }
+
+ void bindZoneId(const int zone_id) {
+ if (sqlite3_bind_int(statement_, 1, zone_id) != SQLITE_OK) {
+ finalize();
+ isc_throw(SQLite3Error, "Could not bind int " << zone_id <<
+ " to SQL statement: " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ }
+
+ void bindName(const std::string& name) {
+ if (sqlite3_bind_text(statement_, 2, name.c_str(), -1,
+ SQLITE_TRANSIENT) != SQLITE_OK) {
+ const char* errmsg = sqlite3_errmsg(accessor_->dbparameters_->db_);
+ finalize();
+ isc_throw(SQLite3Error, "Could not bind text '" << name <<
+ "' to SQL statement: " << errmsg);
+ }
+ }
+
+ void finalize() {
+ sqlite3_finalize(statement_);
+ statement_ = NULL;
+ }
+
+ const IteratorType iterator_type_;
+ boost::shared_ptr<const SQLite3Accessor> accessor_;
+ sqlite3_stmt *statement_;
+ const std::string name_;
+};
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getRecords(const std::string& name, int id,
+ bool subdomains) const
+{
+ return (IteratorContextPtr(new Context(shared_from_this(), id, name,
+ subdomains)));
+}
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getAllRecords(int id) const {
+ return (IteratorContextPtr(new Context(shared_from_this(), id)));
+}
+
+pair<bool, int>
+SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
+ if (dbparameters_->updating_zone) {
+ isc_throw(DataSourceError,
+ "duplicate zone update on SQLite3 data source");
+ }
+
+ const pair<bool, int> zone_info(getZone(zone_name));
+ if (!zone_info.first) {
+ return (zone_info);
+ }
+
+ StatementProcessor(*dbparameters_, BEGIN,
+ "start an SQLite3 transaction").exec();
+
+ if (replace) {
+ try {
+ StatementProcessor delzone_exec(*dbparameters_, DEL_ZONE_RECORDS,
+ "delete zone records");
+
+ sqlite3_clear_bindings(
+ dbparameters_->statements_[DEL_ZONE_RECORDS]);
+ if (sqlite3_bind_int(dbparameters_->statements_[DEL_ZONE_RECORDS],
+ 1, zone_info.second) != SQLITE_OK) {
isc_throw(DataSourceError,
- "bad_alloc in Sqlite3Connection::getNextRecord");
+ "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
}
+
+ delzone_exec.exec();
+ } catch (const DataSourceError&) {
+ // Once we start a transaction, if something unexpected happens
+ // we need to rollback the transaction so that a subsequent update
+ // is still possible with this accessor.
+ StatementProcessor(*dbparameters_, ROLLBACK,
+ "rollback an SQLite3 transaction").exec();
+ throw;
}
- return (true);
- } else if (rc == SQLITE_DONE) {
- // reached the end of matching rows
- resetSearch();
- return (false);
}
- isc_throw(DataSourceError, "Unexpected failure in sqlite3_step: " <<
- sqlite3_errmsg(dbparameters_->db_));
- // Compilers might not realize isc_throw always throws
- return (false);
+
+ dbparameters_->updating_zone = true;
+ dbparameters_->updated_zone_id = zone_info.second;
+
+ return (zone_info);
+}
+
+void
+SQLite3Accessor::commitUpdateZone() {
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "committing zone update on SQLite3 "
+ "data source without transaction");
+ }
+
+ StatementProcessor(*dbparameters_, COMMIT,
+ "commit an SQLite3 transaction").exec();
+ dbparameters_->updating_zone = false;
+ dbparameters_->updated_zone_id = -1;
+}
+
+void
+SQLite3Accessor::rollbackUpdateZone() {
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "rolling back zone update on SQLite3 "
+ "data source without transaction");
+ }
+
+ StatementProcessor(*dbparameters_, ROLLBACK,
+ "rollback an SQLite3 transaction").exec();
+ dbparameters_->updating_zone = false;
+ dbparameters_->updated_zone_id = -1;
}
+namespace {
+// Commonly used code sequence for adding/deleting record
+template <typename COLUMNS_TYPE>
void
-SQLite3Database::resetSearch() {
- sqlite3_reset(dbparameters_->q_any_);
- sqlite3_clear_bindings(dbparameters_->q_any_);
+doUpdate(SQLite3Parameters& dbparams, StatementID stmt_id,
+ COLUMNS_TYPE update_params, const char* exec_desc)
+{
+ sqlite3_stmt* const stmt = dbparams.statements_[stmt_id];
+ StatementProcessor executer(dbparams, stmt_id, exec_desc);
+
+ int param_id = 0;
+ if (sqlite3_bind_int(stmt, ++param_id, dbparams.updated_zone_id)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparams.db_));
+ }
+ const size_t column_count =
+ sizeof(update_params) / sizeof(update_params[0]);
+ for (int i = 0; i < column_count; ++i) {
+ // The old sqlite3 data source API assumes NULL for an empty column.
+ // We need to provide compatibility at least for now.
+ if (sqlite3_bind_text(stmt, ++param_id,
+ update_params[i].empty() ? NULL :
+ update_params[i].c_str(),
+ -1, SQLITE_TRANSIENT) != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparams.db_));
+ }
+ }
+ executer.exec();
+}
}
+void
+SQLite3Accessor::addRecordToZone(const string (&columns)[ADD_COLUMN_COUNT]) {
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "adding record to SQLite3 "
+ "data source without transaction");
+ }
+ doUpdate<const string (&)[DatabaseAccessor::ADD_COLUMN_COUNT]>(
+ *dbparameters_, ADD_RECORD, columns, "add record to zone");
}
+
+void
+SQLite3Accessor::deleteRecordInZone(const string (¶ms)[DEL_PARAM_COUNT]) {
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "deleting record in SQLite3 "
+ "data source without transaction");
+ }
+ doUpdate<const string (&)[DatabaseAccessor::DEL_PARAM_COUNT]>(
+ *dbparameters_, DEL_RECORD, params, "delete record from zone");
}
+
+std::string
+SQLite3Accessor::findPreviousName(int zone_id, const std::string& rname)
+ const
+{
+ sqlite3_reset(dbparameters_->statements_[FIND_PREVIOUS]);
+ sqlite3_clear_bindings(dbparameters_->statements_[FIND_PREVIOUS]);
+
+ if (sqlite3_bind_int(dbparameters_->statements_[FIND_PREVIOUS], 1,
+ zone_id) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind zone ID " << zone_id <<
+ " to SQL statement (find previous): " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ if (sqlite3_bind_text(dbparameters_->statements_[FIND_PREVIOUS], 2,
+ rname.c_str(), -1, SQLITE_STATIC) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind name " << rname <<
+ " to SQL statement (find previous): " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+
+ std::string result;
+ const int rc = sqlite3_step(dbparameters_->statements_[FIND_PREVIOUS]);
+ if (rc == SQLITE_ROW) {
+ // We found it
+ result = convertToPlainChar(sqlite3_column_text(dbparameters_->
+ statements_[FIND_PREVIOUS], 0), dbparameters_->db_);
+ }
+ sqlite3_reset(dbparameters_->statements_[FIND_PREVIOUS]);
+
+ if (rc == SQLITE_DONE) {
+ // No NSEC records here, this DB doesn't support DNSSEC or
+ // we asked before the apex
+ isc_throw(isc::NotImplemented, "The zone doesn't support DNSSEC or "
+ "query before apex");
+ }
+
+ if (rc != SQLITE_ROW && rc != SQLITE_DONE) {
+ // Some kind of error
+ isc_throw(SQLite3Error, "Could not get data for previous name");
+ }
+
+ return (result);
+}
+
+namespace {
+void
+addError(ElementPtr errors, const std::string& error) {
+ if (errors != ElementPtr() && errors->getType() == Element::list) {
+ errors->add(Element::create(error));
+ }
+}
+
+bool
+checkConfig(ConstElementPtr config, ElementPtr errors) {
+ /* Specific configuration is under discussion, right now this accepts
+ * the 'old' configuration, see header file
+ */
+ bool result = true;
+
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Base config for SQlite3 backend must be a map");
+ result = false;
+ } else {
+ if (!config->contains(CONFIG_ITEM_DATABASE_FILE)) {
+ addError(errors,
+ "Config for SQlite3 backend does not contain a '"
+ CONFIG_ITEM_DATABASE_FILE
+ "' value");
+ result = false;
+ } else if (!config->get(CONFIG_ITEM_DATABASE_FILE) ||
+ config->get(CONFIG_ITEM_DATABASE_FILE)->getType() !=
+ Element::string) {
+ addError(errors, "value of " CONFIG_ITEM_DATABASE_FILE
+ " in SQLite3 backend is not a string");
+ result = false;
+ } else if (config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue() ==
+ "") {
+ addError(errors, "value of " CONFIG_ITEM_DATABASE_FILE
+ " in SQLite3 backend is empty");
+ result = false;
+ }
+ }
+
+ return (result);
+}
+
+} // end anonymous namespace
+
+DataSourceClient *
+createInstance(isc::data::ConstElementPtr config) {
+ ElementPtr errors(Element::createList());
+ if (!checkConfig(config, errors)) {
+ isc_throw(DataSourceConfigError, errors->str());
+ }
+ std::string dbfile = config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue();
+ boost::shared_ptr<DatabaseAccessor> sqlite3_accessor(
+ new SQLite3Accessor(dbfile, isc::dns::RRClass::IN()));
+ return (new DatabaseClient(isc::dns::RRClass::IN(), sqlite3_accessor));
+}
+
+void destroyInstance(DataSourceClient* instance) {
+ delete instance;
+}
+
+} // end of namespace datasrc
+} // end of namespace isc
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index 4c2ec8b..3286f3b 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -20,8 +20,12 @@
#include <exceptions/exceptions.h>
+#include <boost/enable_shared_from_this.hpp>
+#include <boost/scoped_ptr.hpp>
#include <string>
+#include <cc/data.h>
+
namespace isc {
namespace dns {
class RRClass;
@@ -51,7 +55,8 @@ struct SQLite3Parameters;
* According to the design, it doesn't interpret the data in any way, it just
* provides unified access to the DB.
*/
-class SQLite3Database : public DatabaseAccessor {
+class SQLite3Accessor : public DatabaseAccessor,
+ public boost::enable_shared_from_this<SQLite3Accessor> {
public:
/**
* \brief Constructor
@@ -66,14 +71,30 @@ public:
* file can contain multiple classes of data, single database can
* provide only one class).
*/
- SQLite3Database(const std::string& filename,
+ SQLite3Accessor(const std::string& filename,
const isc::dns::RRClass& rrclass);
+
+ /**
+ * \brief Constructor
+ *
+ * Same as the other version, but takes rrclass as a bare string.
+ * we should obsolete the other version and unify the constructor to
+ * this version; the SQLite3Accessor is expected to be "dumb" and
+ * shouldn't care about DNS specific information such as RRClass.
+ */
+ SQLite3Accessor(const std::string& filename, const std::string& rrclass);
+
/**
* \brief Destructor
*
* Closes the database.
*/
- ~SQLite3Database();
+ ~SQLite3Accessor();
+
+ /// This implementation internally opens a new sqlite3 database for the
+ /// same file name specified in the constructor of the original accessor.
+ virtual boost::shared_ptr<DatabaseAccessor> clone();
+
/**
* \brief Look up a zone
*
@@ -83,57 +104,66 @@ public:
*
* \exception SQLite3Error if something about the database is broken.
*
- * \param name The name of zone to look up
+ * \param name The (fully qualified) domain name of zone to look up
* \return The pair contains if the lookup was successful in the first
* element and the zone id in the second if it was.
*/
- virtual std::pair<bool, int> getZone(const isc::dns::Name& name) const;
+ virtual std::pair<bool, int> getZone(const std::string& name) const;
- /**
- * \brief Start a new search for the given name in the given zone.
+ /** \brief Look up all resource records for a name
*
- * This implements the searchForRecords from DatabaseConnection.
- * This particular implementation does not raise DataSourceError.
+ * This implements the getRecords() method from DatabaseAccessor
*
- * \exception DataSourceError when sqlite3_bind_int() or
- * sqlite3_bind_text() fails
+ * \exception SQLite3Error if there is an sqlite3 error when performing
+ * the query
*
- * \param zone_id The zone to seach in, as returned by getZone()
- * \param name The name to find records for
+ * \param name the name to look up
+ * \param id the zone id, as returned by getZone()
+ * \param subdomains Match subdomains instead of the name.
+ * \return Iterator that contains all records with the given name
*/
- virtual void searchForRecords(int zone_id, const std::string& name);
+ virtual IteratorContextPtr getRecords(const std::string& name,
+ int id,
+ bool subdomains = false) const;
- /**
- * \brief Retrieve the next record from the search started with
- * searchForRecords
- *
- * This implements the getNextRecord from DatabaseConnection.
- * See the documentation there for more information.
- *
- * If this method raises an exception, the contents of columns are undefined.
+ /** \brief Look up all resource records for a zone
*
- * \exception DataSourceError if there is an error returned by sqlite_step()
- * When this exception is raised, the current
- * search as initialized by searchForRecords() is
- * NOT reset, and the caller is expected to take
- * care of that.
- * \param columns This vector will be cleared, and the fields of the record will
- * be appended here as strings (in the order rdtype, ttl, sigtype,
- * and rdata). If there was no data (i.e. if this call returns
- * false), the vector is untouched.
- * \return true if there was a next record, false if there was not
- */
- virtual bool getNextRecord(std::string columns[], size_t column_count);
-
- /**
- * \brief Resets any state created by searchForRecords
+ * This implements the getRecords() method from DatabaseAccessor
*
- * This implements the resetSearch from DatabaseConnection.
- * See the documentation there for more information.
+ * \exception SQLite3Error if there is an sqlite3 error when performing
+ * the query
*
- * This function never throws.
+ * \param id the zone id, as returned by getZone()
+ * \return Iterator that contains all records in the given zone
*/
- virtual void resetSearch();
+ virtual IteratorContextPtr getAllRecords(int id) const;
+
+ virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
+ bool replace);
+
+ /// \note we are quite impatient here: it's quite possible that the COMMIT
+ /// fails due to other process performing SELECT on the same database
+ /// (consider the case where COMMIT is done by xfrin or dynamic update
+ /// server while an authoritative server is busy reading the DB).
+ /// In a future version we should probably need to introduce some retry
+ /// attempt and/or increase timeout before giving up the COMMIT, even
+ /// if it still doesn't guarantee 100% success. Right now this
+ /// implementation throws a \c DataSourceError exception in such a case.
+ virtual void commitUpdateZone();
+
+ /// \note In SQLite3 rollback can fail if there's another unfinished
+ /// statement is performed for the same database structure.
+ /// Although it's not expected to happen in our expected usage, it's not
+ /// guaranteed to be prevented at the API level. If it ever happens, this
+ /// method throws a \c DataSourceError exception. It should be
+ /// considered a bug of the higher level application program.
+ virtual void rollbackUpdateZone();
+
+ virtual void addRecordToZone(
+ const std::string (&columns)[ADD_COLUMN_COUNT]);
+
+ virtual void deleteRecordInZone(
+ const std::string (¶ms)[DEL_PARAM_COUNT]);
/// The SQLite3 implementation of this method returns a string starting
/// with a fixed prefix of "sqlite3_" followed by the DB file name
@@ -142,19 +172,44 @@ public:
/// "sqlite3_bind10.sqlite3".
virtual const std::string& getDBName() const { return (database_name_); }
+ /// \brief Concrete implementation of the pure virtual method
+ virtual std::string findPreviousName(int zone_id, const std::string& rname)
+ const;
+
private:
/// \brief Private database data
- SQLite3Parameters* dbparameters_;
+ boost::scoped_ptr<SQLite3Parameters> dbparameters_;
+ /// \brief The filename of the DB (necessary for clone())
+ const std::string filename_;
/// \brief The class for which the queries are done
const std::string class_;
/// \brief Opens the database
void open(const std::string& filename);
/// \brief Closes the database
void close();
+ /// \brief SQLite3 implementation of IteratorContext
+ class Context;
+ friend class Context;
const std::string database_name_;
};
+/// \brief Creates an instance of the SQlite3 datasource client
+///
+/// Currently the configuration passed here must be a MapElement, containing
+/// one item called "database_file", whose value is a string
+///
+/// This configuration setup is currently under discussion and will change in
+/// the near future.
+extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr config);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void destroyInstance(DataSourceClient* instance);
+
}
}
-#endif
+#endif // __DATASRC_SQLITE3_CONNECTION_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/sqlite3_datasrc.cc b/src/lib/datasrc/sqlite3_datasrc.cc
index 18ee929..03b057c 100644
--- a/src/lib/datasrc/sqlite3_datasrc.cc
+++ b/src/lib/datasrc/sqlite3_datasrc.cc
@@ -26,6 +26,8 @@
#include <dns/rrset.h>
#include <dns/rrsetlist.h>
+#define SQLITE_SCHEMA_VERSION 1
+
using namespace std;
using namespace isc::dns;
using namespace isc::dns::rdata;
@@ -77,6 +79,8 @@ const char* const SCHEMA_LIST[] = {
NULL
};
+const char* const q_version_str = "SELECT version FROM schema_version";
+
const char* const q_zone_str = "SELECT id FROM zones WHERE name=?1";
const char* const q_record_str = "SELECT rdtype, ttl, sigtype, rdata "
@@ -254,7 +258,7 @@ Sqlite3DataSrc::findRecords(const Name& name, const RRType& rdtype,
}
break;
}
-
+
sqlite3_reset(query);
sqlite3_clear_bindings(query);
@@ -295,7 +299,7 @@ Sqlite3DataSrc::findRecords(const Name& name, const RRType& rdtype,
//
sqlite3_reset(dbparameters->q_count_);
sqlite3_clear_bindings(dbparameters->q_count_);
-
+
rc = sqlite3_bind_int(dbparameters->q_count_, 1, zone_id);
if (rc != SQLITE_OK) {
isc_throw(Sqlite3Error, "Could not bind zone ID " << zone_id <<
@@ -653,29 +657,90 @@ prepare(sqlite3* const db, const char* const statement) {
return (prepared);
}
-void
-checkAndSetupSchema(Sqlite3Initializer* initializer) {
- sqlite3* const db = initializer->params_.db_;
+// small function to sleep for 0.1 seconds, needed when waiting for
+// exclusive database locks (which should only occur on startup, and only
+// when the database has not been created yet)
+void do_sleep() {
+ struct timespec req;
+ req.tv_sec = 0;
+ req.tv_nsec = 100000000;
+ nanosleep(&req, NULL);
+}
+// returns the schema version if the schema version table exists
+// returns -1 if it does not
+int check_schema_version(sqlite3* db) {
sqlite3_stmt* prepared = NULL;
- if (sqlite3_prepare_v2(db, "SELECT version FROM schema_version", -1,
- &prepared, NULL) == SQLITE_OK &&
- sqlite3_step(prepared) == SQLITE_ROW) {
- initializer->params_.version_ = sqlite3_column_int(prepared, 0);
- sqlite3_finalize(prepared);
- } else {
- logger.info(DATASRC_SQLITE_SETUP);
- if (prepared != NULL) {
- sqlite3_finalize(prepared);
+ // At this point in time, the database might be exclusively locked, in
+ // which case even prepare() will return BUSY, so we may need to try a
+ // few times
+ for (size_t i = 0; i < 50; ++i) {
+ int rc = sqlite3_prepare_v2(db, q_version_str, -1, &prepared, NULL);
+ if (rc == SQLITE_ERROR) {
+ // this is the error that is returned when the table does not
+ // exist
+ return (-1);
+ } else if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(Sqlite3Error, "Unable to prepare version query: "
+ << rc << " " << sqlite3_errmsg(db));
}
+ do_sleep();
+ }
+ if (sqlite3_step(prepared) != SQLITE_ROW) {
+ isc_throw(Sqlite3Error,
+ "Unable to query version: " << sqlite3_errmsg(db));
+ }
+ int version = sqlite3_column_int(prepared, 0);
+ sqlite3_finalize(prepared);
+ return (version);
+}
+
+// return db version
+int create_database(sqlite3* db) {
+ // try to get an exclusive lock. Once that is obtained, do the version
+ // check *again*, just in case this process was racing another
+ //
+ // try for 5 secs (50*0.1)
+ int rc;
+ logger.info(DATASRC_SQLITE_SETUP);
+ for (size_t i = 0; i < 50; ++i) {
+ rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL,
+ NULL);
+ if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(Sqlite3Error, "Unable to acquire exclusive lock "
+ "for database creation: " << sqlite3_errmsg(db));
+ }
+ do_sleep();
+ }
+ int schema_version = check_schema_version(db);
+ if (schema_version == -1) {
for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
SQLITE_OK) {
isc_throw(Sqlite3Error,
- "Failed to set up schema " << SCHEMA_LIST[i]);
+ "Failed to set up schema " << SCHEMA_LIST[i]);
}
}
+ sqlite3_exec(db, "COMMIT TRANSACTION", NULL, NULL, NULL);
+ return (SQLITE_SCHEMA_VERSION);
+ } else {
+ return (schema_version);
+ }
+}
+
+void
+checkAndSetupSchema(Sqlite3Initializer* initializer) {
+ sqlite3* const db = initializer->params_.db_;
+
+ int schema_version = check_schema_version(db);
+ if (schema_version != SQLITE_SCHEMA_VERSION) {
+ schema_version = create_database(db);
}
+ initializer->params_.version_ = schema_version;
initializer->params_.q_zone_ = prepare(db, q_zone_str);
initializer->params_.q_record_ = prepare(db, q_record_str);
diff --git a/src/lib/datasrc/static_datasrc.cc b/src/lib/datasrc/static_datasrc.cc
index 65229a0..fd43e1c 100644
--- a/src/lib/datasrc/static_datasrc.cc
+++ b/src/lib/datasrc/static_datasrc.cc
@@ -70,6 +70,7 @@ StaticDataSrcImpl::StaticDataSrcImpl() :
authors = RRsetPtr(new RRset(authors_name, RRClass::CH(),
RRType::TXT(), RRTTL(0)));
authors->addRdata(generic::TXT("Chen Zhengzhang")); // Jerry
+ authors->addRdata(generic::TXT("Dmitriy Volodin"));
authors->addRdata(generic::TXT("Evan Hunt"));
authors->addRdata(generic::TXT("Haidong Wang")); // Ocean
authors->addRdata(generic::TXT("Han Feng"));
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index 1a65f82..3183b1d 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -1,8 +1,12 @@
+SUBDIRS = . testdata
+
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_builddir)/src/lib/dns -I$(top_srcdir)/src/lib/dns
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CPPFLAGS += $(SQLITE_CFLAGS)
-AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(srcdir)/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_srcdir)/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_builddir)/testdata\"
+AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
AM_CXXFLAGS = $(B10_CXXFLAGS)
@@ -25,11 +29,23 @@ run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += cache_unittest.cc
run_unittests_SOURCES += test_datasrc.h test_datasrc.cc
run_unittests_SOURCES += rbtree_unittest.cc
-run_unittests_SOURCES += zonetable_unittest.cc
-run_unittests_SOURCES += memory_datasrc_unittest.cc
+#run_unittests_SOURCES += zonetable_unittest.cc
+#run_unittests_SOURCES += memory_datasrc_unittest.cc
run_unittests_SOURCES += logger_unittest.cc
run_unittests_SOURCES += database_unittest.cc
+run_unittests_SOURCES += client_unittest.cc
run_unittests_SOURCES += sqlite3_accessor_unittest.cc
+if !USE_STATIC_LINK
+# This test uses dynamically loadable module. It will cause various
+# troubles with static link such as "missing" symbols in the static object
+# for the module. As a workaround we disable this particualr test
+# in this case.
+run_unittests_SOURCES += factory_unittest.cc
+endif
+# for the dlopened types we have tests for, we also need to include the
+# sources
+run_unittests_SOURCES += $(top_srcdir)/src/lib/datasrc/sqlite3_accessor.cc
+#run_unittests_SOURCES += $(top_srcdir)/src/lib/datasrc/memory_datasrc.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
@@ -60,3 +76,4 @@ EXTRA_DIST += testdata/sql1.example.com.signed
EXTRA_DIST += testdata/sql2.example.com.signed
EXTRA_DIST += testdata/test-root.sqlite3
EXTRA_DIST += testdata/test.sqlite3
+EXTRA_DIST += testdata/rwtest.sqlite3
diff --git a/src/lib/datasrc/tests/cache_unittest.cc b/src/lib/datasrc/tests/cache_unittest.cc
index 96beae0..1325f64 100644
--- a/src/lib/datasrc/tests/cache_unittest.cc
+++ b/src/lib/datasrc/tests/cache_unittest.cc
@@ -202,15 +202,15 @@ TEST_F(CacheTest, retrieveFail) {
}
TEST_F(CacheTest, expire) {
- // Insert "foo" with a duration of 2 seconds; sleep 3. The
+ // Insert "foo" with a duration of 1 seconds; sleep 2. The
// record should not be returned from the cache even though it's
// at the top of the cache.
RRsetPtr aaaa(new RRset(Name("foo"), RRClass::IN(), RRType::AAAA(),
RRTTL(0)));
aaaa->addRdata(in::AAAA("2001:db8:3:bb::5"));
- cache.addPositive(aaaa, 0, 2);
+ cache.addPositive(aaaa, 0, 1);
- sleep(3);
+ sleep(2);
RRsetPtr r;
uint32_t f;
diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc
new file mode 100644
index 0000000..5b2c91a
--- /dev/null
+++ b/src/lib/datasrc/tests/client_unittest.cc
@@ -0,0 +1,50 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <datasrc/client.h>
+
+#include <dns/name.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using isc::dns::Name;
+
+namespace {
+
+/*
+ * The DataSourceClient can't be created as it has pure virtual methods.
+ * So we implement them as NOPs and test the other methods.
+ */
+class NopClient : public DataSourceClient {
+public:
+ virtual FindResult findZone(const isc::dns::Name&) const {
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+ }
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name&, bool) const {
+ return (ZoneUpdaterPtr());
+ }
+};
+
+class ClientTest : public ::testing::Test {
+public:
+ NopClient client_;
+};
+
+// The default implementation is NotImplemented
+TEST_F(ClientTest, defaultIterator) {
+ EXPECT_THROW(client_.getIterator(Name(".")), isc::NotImplemented);
+}
+
+}
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index fcfcefe..fe57185 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <boost/foreach.hpp>
+
#include <gtest/gtest.h>
#include <dns/name.h>
@@ -22,6 +24,8 @@
#include <datasrc/database.h>
#include <datasrc/zone.h>
#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
+#include <datasrc/sqlite3_accessor.h>
#include <testutils/dnsmessage_test.h>
@@ -30,135 +34,624 @@
using namespace isc::datasrc;
using namespace std;
using namespace boost;
-using isc::dns::Name;
+using namespace isc::dns;
namespace {
+// Imaginary zone IDs used in the mock accessor below.
+const int READONLY_ZONE_ID = 42;
+const int WRITABLE_ZONE_ID = 4200;
+
+// Commonly used test data
+const char* const TEST_RECORDS[][5] = {
+ // some plain data
+ {"www.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"www.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+ {"www.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+ {"www.example.org.", "NSEC", "3600", "", "www2.example.org. A AAAA NSEC RRSIG"},
+ {"www.example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"www2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"www2.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+ {"www2.example.org.", "A", "3600", "", "192.0.2.2"},
+
+ {"cname.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+ // some DNSSEC-'signed' data
+ {"signed1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"signed1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"signed1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE"},
+ {"signed1.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+ {"signed1.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+ {"signed1.example.org.", "RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"signedcname1.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"signedcname1.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // special case might fail; sig is for cname, which isn't there (should be ignored)
+ // (ignoring of 'normal' other type is done above by www.)
+ {"acnamesig1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"acnamesig1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig1.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // let's pretend we have a database that is not careful
+ // about the order in which it returns data
+ {"signed2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"signed2.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+ {"signed2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE"},
+ {"signed2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"signed2.example.org.", "RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"signed2.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+
+ {"signedcname2.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"signedcname2.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+ {"acnamesig2.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"acnamesig2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"acnamesig3.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig3.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig3.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ {"ttldiff1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"ttldiff1.example.org.", "A", "360", "", "192.0.2.2"},
+
+ {"ttldiff2.example.org.", "A", "360", "", "192.0.2.1"},
+ {"ttldiff2.example.org.", "A", "3600", "", "192.0.2.2"},
+
+ // also add some intentionally bad data
+ {"badcname1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"badcname1.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+ {"badcname2.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"badcname2.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ {"badcname3.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"badcname3.example.org.", "CNAME", "3600", "", "www.example2.org."},
+
+ {"badrdata.example.org.", "A", "3600", "", "bad"},
+
+ {"badtype.example.org.", "BAD_TYPE", "3600", "", "192.0.2.1"},
+
+ {"badttl.example.org.", "A", "badttl", "", "192.0.2.1"},
+
+ {"badsig.example.org.", "A", "badttl", "", "192.0.2.1"},
+ {"badsig.example.org.", "RRSIG", "3600", "", "A 5 3 3600 somebaddata 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"badsigtype.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"badsigtype.example.org.", "RRSIG", "3600", "TXT", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // Data for testing delegation (with NS and DNAME)
+ {"delegation.example.org.", "NS", "3600", "", "ns.example.com."},
+ {"delegation.example.org.", "NS", "3600", "",
+ "ns.delegation.example.org."},
+ {"delegation.example.org.", "DS", "3600", "", "1 RSAMD5 2 abcd"},
+ {"delegation.example.org.", "RRSIG", "3600", "", "NS 5 3 3600 "
+ "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"ns.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"deep.below.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ {"dname.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"dname.example.org.", "DNAME", "3600", "", "dname.example.com."},
+ {"dname.example.org.", "RRSIG", "3600", "",
+ "DNAME 5 3 3600 20000101000000 20000201000000 12345 "
+ "example.org. FAKEFAKEFAKE"},
+
+ {"below.dname.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ // Broken NS
+ {"brokenns1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"brokenns1.example.org.", "NS", "3600", "", "ns.example.com."},
+
+ {"brokenns2.example.org.", "NS", "3600", "", "ns.example.com."},
+ {"brokenns2.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ // Now double DNAME, to test failure mode
+ {"baddname.example.org.", "DNAME", "3600", "", "dname1.example.com."},
+ {"baddname.example.org.", "DNAME", "3600", "", "dname2.example.com."},
+
+ // Put some data into apex (including NS) so we can check our NS
+ // doesn't break anything
+ {"example.org.", "NS", "3600", "", "ns.example.com."},
+ {"example.org.", "A", "3600", "", "192.0.2.1"},
+ {"example.org.", "NSEC", "3600", "", "acnamesig1.example.org. NS A NSEC RRSIG"},
+ {"example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"example.org.", "RRSIG", "3600", "", "NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // This is because of empty domain test
+ {"a.b.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ // Something for wildcards
+ {"*.wild.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"*.wild.example.org.", "RRSIG", "3600", "A", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"*.wild.example.org.", "NSEC", "3600", "", "cancel.here.wild.example.org. A NSEC RRSIG"},
+ {"*.wild.example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"cancel.here.wild.example.org.", "AAAA", "3600", "", "2001:db8::5"},
+ {"delegatedwild.example.org.", "NS", "3600", "", "ns.example.com."},
+ {"*.delegatedwild.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"wild.*.foo.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"wild.*.foo.*.bar.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"bao.example.org.", "NSEC", "3600", "", "wild.*.foo.*.bar.example.org. NSEC"},
+ {"*.cnamewild.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"*.nswild.example.org.", "NS", "3600", "", "ns.example.com."},
+ // For NSEC empty non-terminal
+ {"l.example.org.", "NSEC", "3600", "", "empty.nonterminal.example.org. NSEC"},
+ {"empty.nonterminal.example.org.", "A", "3600", "", "192.0.2.1"},
+ // Invalid rdata
+ {"invalidrdata.example.org.", "A", "3600", "", "Bunch of nonsense"},
+ {"invalidrdata2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"invalidrdata2.example.org.", "RRSIG", "3600", "", "Nonsense"},
+
+ {NULL, NULL, NULL, NULL, NULL},
+};
+
/*
- * A virtual database database that pretends it contains single zone --
+ * An accessor with minimum implementation, keeping the original
+ * "NotImplemented" methods.
+ */
+class NopAccessor : public DatabaseAccessor {
+public:
+ NopAccessor() : database_name_("mock_database")
+ { }
+
+ virtual std::pair<bool, int> getZone(const std::string& name) const {
+ if (name == "example.org.") {
+ return (std::pair<bool, int>(true, READONLY_ZONE_ID));
+ } else if (name == "null.example.org.") {
+ return (std::pair<bool, int>(true, 13));
+ } else if (name == "empty.example.org.") {
+ return (std::pair<bool, int>(true, 0));
+ } else if (name == "bad.example.org.") {
+ return (std::pair<bool, int>(true, -1));
+ } else {
+ return (std::pair<bool, int>(false, 0));
+ }
+ }
+
+ virtual shared_ptr<DatabaseAccessor> clone() {
+ return (shared_ptr<DatabaseAccessor>()); // bogus data, but unused
+ }
+
+ virtual std::pair<bool, int> startUpdateZone(const std::string&, bool) {
+ // return dummy value. unused anyway.
+ return (pair<bool, int>(true, 0));
+ }
+ virtual void commitUpdateZone() {}
+ virtual void rollbackUpdateZone() {}
+ virtual void addRecordToZone(const string (&)[ADD_COLUMN_COUNT]) {}
+ virtual void deleteRecordInZone(const string (&)[DEL_PARAM_COUNT]) {}
+
+ virtual const std::string& getDBName() const {
+ return (database_name_);
+ }
+
+ virtual IteratorContextPtr getRecords(const std::string&, int, bool)
+ const
+ {
+ isc_throw(isc::NotImplemented,
+ "This database datasource can't be iterated");
+ }
+
+ virtual IteratorContextPtr getAllRecords(int) const {
+ isc_throw(isc::NotImplemented,
+ "This database datasource can't be iterated");
+ }
+
+ virtual std::string findPreviousName(int, const std::string&) const {
+ isc_throw(isc::NotImplemented,
+ "This data source doesn't support DNSSEC");
+ }
+private:
+ const std::string database_name_;
+
+};
+
+/*
+ * A virtual database accessor that pretends it contains single zone --
* example.org.
+ *
+ * It has the same getZone method as NopConnection, but it provides
+ * implementation of the optional functionality.
*/
-class MockAccessor : public DatabaseAccessor {
+class MockAccessor : public NopAccessor {
+ // Type of mock database "row"s
+ typedef std::map<std::string, std::vector< std::vector<std::string> > >
+ Domains;
+
public:
- MockAccessor() : search_running_(false),
- database_name_("mock_database")
- {
+ MockAccessor() : rollbacked_(false) {
+ readonly_records_ = &readonly_records_master_;
+ update_records_ = &update_records_master_;
+ empty_records_ = &empty_records_master_;
fillData();
}
- virtual std::pair<bool, int> getZone(const Name& name) const {
- if (name == Name("example.org")) {
- return (std::pair<bool, int>(true, 42));
- } else {
- return (std::pair<bool, int>(false, 0));
- }
+ virtual shared_ptr<DatabaseAccessor> clone() {
+ shared_ptr<MockAccessor> cloned_accessor(new MockAccessor());
+ cloned_accessor->readonly_records_ = &readonly_records_master_;
+ cloned_accessor->update_records_ = &update_records_master_;
+ cloned_accessor->empty_records_ = &empty_records_master_;
+ latest_clone_ = cloned_accessor;
+ return (cloned_accessor);
}
- virtual void searchForRecords(int zone_id, const std::string& name) {
- search_running_ = true;
+private:
+ class MockNameIteratorContext : public IteratorContext {
+ public:
+ MockNameIteratorContext(const MockAccessor& mock_accessor, int zone_id,
+ const std::string& name, bool subdomains) :
+ searched_name_(name), cur_record_(0)
+ {
+ // 'hardcoded' names to trigger exceptions
+ // On these names some exceptions are thrown, to test the robustness
+ // of the find() method.
+ if (searched_name_ == "dsexception.in.search.") {
+ isc_throw(DataSourceError, "datasource exception on search");
+ } else if (searched_name_ == "iscexception.in.search.") {
+ isc_throw(isc::Exception, "isc exception on search");
+ } else if (searched_name_ == "basicexception.in.search.") {
+ throw std::exception();
+ }
- // 'hardcoded' name to trigger exceptions (for testing
- // the error handling of find() (the other on is below in
- // if the name is "exceptiononsearch" it'll raise an exception here
- if (name == "dsexception.in.search.") {
- isc_throw(DataSourceError, "datasource exception on search");
- } else if (name == "iscexception.in.search.") {
- isc_throw(isc::Exception, "isc exception on search");
- } else if (name == "basicexception.in.search.") {
- throw std::exception();
- }
- searched_name_ = name;
-
- // we're not aiming for efficiency in this test, simply
- // copy the relevant vector from records
- cur_record = 0;
- if (zone_id == 42) {
- if (records.count(name) > 0) {
- cur_name = records.find(name)->second;
+ cur_record_ = 0;
+ const Domains& cur_records = mock_accessor.getMockRecords(zone_id);
+ if (cur_records.count(name) > 0) {
+ // we're not aiming for efficiency in this test, simply
+ // copy the relevant vector from records
+ cur_name = cur_records.find(name)->second;
+ } else if (subdomains) {
+ cur_name.clear();
+ // Just walk everything and check if it is a subdomain.
+ // If it is, just copy all data from there.
+ for (Domains::const_iterator i(cur_records.begin());
+ i != cur_records.end(); ++i) {
+ const Name local(i->first);
+ if (local.compare(Name(name)).getRelation() ==
+ isc::dns::NameComparisonResult::SUBDOMAIN) {
+ cur_name.insert(cur_name.end(), i->second.begin(),
+ i->second.end());
+ }
+ }
} else {
cur_name.clear();
}
- } else {
- cur_name.clear();
}
- };
- virtual bool getNextRecord(std::string columns[], size_t column_count) {
- if (searched_name_ == "dsexception.in.getnext.") {
- isc_throw(DataSourceError, "datasource exception on getnextrecord");
- } else if (searched_name_ == "iscexception.in.getnext.") {
- isc_throw(isc::Exception, "isc exception on getnextrecord");
- } else if (searched_name_ == "basicexception.in.getnext.") {
- throw std::exception();
- }
+ virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) {
+ if (searched_name_ == "dsexception.in.getnext.") {
+ isc_throw(DataSourceError, "datasource exception on getnextrecord");
+ } else if (searched_name_ == "iscexception.in.getnext.") {
+ isc_throw(isc::Exception, "isc exception on getnextrecord");
+ } else if (searched_name_ == "basicexception.in.getnext.") {
+ throw std::exception();
+ }
- if (column_count != DatabaseAccessor::COLUMN_COUNT) {
- isc_throw(DataSourceError, "Wrong column count in getNextRecord");
+ if (cur_record_ < cur_name.size()) {
+ for (size_t i = 0; i < COLUMN_COUNT; ++i) {
+ columns[i] = cur_name[cur_record_][i];
+ }
+ cur_record_++;
+ return (true);
+ } else {
+ return (false);
+ }
}
- if (cur_record < cur_name.size()) {
- for (size_t i = 0; i < column_count; ++i) {
- columns[i] = cur_name[cur_record][i];
+
+ private:
+ const std::string searched_name_;
+ int cur_record_;
+ std::vector< std::vector<std::string> > cur_name;
+ };
+
+ class MockIteratorContext : public IteratorContext {
+ private:
+ int step;
+ public:
+ MockIteratorContext() :
+ step(0)
+ { }
+ virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ switch (step ++) {
+ case 0:
+ data[DatabaseAccessor::NAME_COLUMN] = "example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "SOA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200";
+ return (true);
+ case 1:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 2:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+ return (true);
+ case 3:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::1";
+ return (true);
+ case 4:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::2";
+ return (true);
+ default:
+ ADD_FAILURE() <<
+ "Request past the end of iterator context";
+ case 5:
+ return (false);
}
- cur_record++;
- return (true);
- } else {
- resetSearch();
+ }
+ };
+ class EmptyIteratorContext : public IteratorContext {
+ public:
+ virtual bool getNext(string(&)[COLUMN_COUNT]) {
return (false);
}
};
+ class BadIteratorContext : public IteratorContext {
+ private:
+ int step;
+ public:
+ BadIteratorContext() :
+ step(0)
+ { }
+ virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ switch (step ++) {
+ case 0:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 1:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "301";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+ return (true);
+ default:
+ ADD_FAILURE() <<
+ "Request past the end of iterator context";
+ case 2:
+ return (false);
+ }
+ }
+ };
+public:
+ virtual IteratorContextPtr getAllRecords(int id) const {
+ if (id == READONLY_ZONE_ID) {
+ return (IteratorContextPtr(new MockIteratorContext()));
+ } else if (id == 13) {
+ return (IteratorContextPtr());
+ } else if (id == 0) {
+ return (IteratorContextPtr(new EmptyIteratorContext()));
+ } else if (id == -1) {
+ return (IteratorContextPtr(new BadIteratorContext()));
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
+ virtual IteratorContextPtr getRecords(const std::string& name, int id,
+ bool subdomains) const
+ {
+ if (id == READONLY_ZONE_ID || id == WRITABLE_ZONE_ID) {
+ return (IteratorContextPtr(
+ new MockNameIteratorContext(*this, id, name,
+ subdomains)));
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
+ virtual pair<bool, int> startUpdateZone(const std::string& zone_name,
+ bool replace)
+ {
+ const pair<bool, int> zone_info = getZone(zone_name);
+ if (!zone_info.first) {
+ return (pair<bool, int>(false, 0));
+ }
+
+ // Prepare the record set for update. If replacing the existing one,
+ // we use an empty set; otherwise we use a writable copy of the
+ // original.
+ if (replace) {
+ update_records_->clear();
+ } else {
+ *update_records_ = *readonly_records_;
+ }
+
+ return (pair<bool, int>(true, WRITABLE_ZONE_ID));
+ }
+ virtual void commitUpdateZone() {
+ *readonly_records_ = *update_records_;
+ }
+ virtual void rollbackUpdateZone() {
+ // Special hook: if something with a name of "throw.example.org"
+ // has been added, trigger an imaginary unexpected event with an
+ // exception.
+ if (update_records_->count("throw.example.org.") > 0) {
+ isc_throw(DataSourceError, "unexpected failure in rollback");
+ }
+
+ rollbacked_ = true;
+ }
+ virtual void addRecordToZone(const string (&columns)[ADD_COLUMN_COUNT]) {
+ // Copy the current value to cur_name. If it doesn't exist,
+ // operator[] will create a new one.
+ cur_name_ = (*update_records_)[columns[DatabaseAccessor::ADD_NAME]];
+
+ vector<string> record_columns;
+ record_columns.push_back(columns[DatabaseAccessor::ADD_TYPE]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_TTL]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_SIGTYPE]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_RDATA]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_NAME]);
+
+ // copy back the added entry
+ cur_name_.push_back(record_columns);
+ (*update_records_)[columns[DatabaseAccessor::ADD_NAME]] = cur_name_;
+
+ // remember this one so that test cases can check it.
+ copy(columns, columns + DatabaseAccessor::ADD_COLUMN_COUNT,
+ columns_lastadded_);
+ }
- virtual void resetSearch() {
- search_running_ = false;
+ // Helper predicate class used in deleteRecordInZone().
+ struct deleteMatch {
+ deleteMatch(const string& type, const string& rdata) :
+ type_(type), rdata_(rdata)
+ {}
+ bool operator()(const vector<string>& row) const {
+ return (row[0] == type_ && row[3] == rdata_);
+ }
+ const string& type_;
+ const string& rdata_;
};
- bool searchRunning() const {
- return (search_running_);
+ virtual void deleteRecordInZone(const string (¶ms)[DEL_PARAM_COUNT]) {
+ vector<vector<string> >& records =
+ (*update_records_)[params[DatabaseAccessor::DEL_NAME]];
+ records.erase(remove_if(records.begin(), records.end(),
+ deleteMatch(
+ params[DatabaseAccessor::DEL_TYPE],
+ params[DatabaseAccessor::DEL_RDATA])),
+ records.end());
+ if (records.empty()) {
+ (*update_records_).erase(params[DatabaseAccessor::DEL_NAME]);
+ }
}
- virtual const std::string& getDBName() const {
- return (database_name_);
+ //
+ // Helper methods to keep track of some update related activities
+ //
+ bool isRollbacked() const {
+ return (rollbacked_);
}
+
+ const string* getLastAdded() const {
+ return (columns_lastadded_);
+ }
+
+ // This allows the test code to get the accessor used in an update context
+ shared_ptr<const MockAccessor> getLatestClone() const {
+ return (latest_clone_);
+ }
+
+ virtual std::string findPreviousName(int id, const std::string& rname)
+ const
+ {
+ // Hardcoded for now, but we could compute it from the data
+ // Maybe do it when it is needed some time in future?
+ if (id == -1) {
+ isc_throw(isc::NotImplemented, "Test not implemented behaviour");
+ } else if (id == 42) {
+ if (rname == "org.example.nonterminal.") {
+ return ("l.example.org.");
+ } else if (rname == "org.example.aa.") {
+ return ("example.org.");
+ } else if (rname == "org.example.www2." ||
+ rname == "org.example.www1.") {
+ return ("www.example.org.");
+ } else if (rname == "org.example.badnsec2.") {
+ return ("badnsec1.example.org.");
+ } else if (rname == "org.example.brokenname.") {
+ return ("brokenname...example.org.");
+ } else if (rname == "org.example.bar.*.") {
+ return ("bao.example.org.");
+ } else if (rname == "org.example.notimplnsec." ||
+ rname == "org.example.wild.here.") {
+ isc_throw(isc::NotImplemented, "Not implemented in this test");
+ } else {
+ isc_throw(isc::Unexpected, "Unexpected name");
+ }
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
private:
- std::map<std::string, std::vector< std::vector<std::string> > > records;
- // used as internal index for getNextRecord()
- size_t cur_record;
+ // The following member variables are storage and/or update work space
+ // of the test zone. The "master"s are the real objects that contain
+ // the data, and they are shared among all accessors cloned from
+ // an initially created one. The pointer members allow the sharing.
+ // "readonly" is for normal lookups. "update" is the workspace for
+ // updates. When update starts it will be initialized either as an
+ // empty set (when replacing the entire zone) or as a copy of the
+ // "readonly" one. "empty" is a sentinel to produce negative results.
+ Domains readonly_records_master_;
+ Domains* readonly_records_;
+ Domains update_records_master_;
+ Domains* update_records_;
+ const Domains empty_records_master_;
+ const Domains* empty_records_;
+
+ // used as temporary storage during the building of the fake data
+
// used as temporary storage after searchForRecord() and during
// getNextRecord() calls, as well as during the building of the
// fake data
- std::vector< std::vector<std::string> > cur_name;
+ std::vector< std::vector<std::string> > cur_name_;
- // This boolean is used to make sure find() calls resetSearch
- // when it encounters an error
- bool search_running_;
+ // The columns that were most recently added via addRecordToZone()
+ string columns_lastadded_[ADD_COLUMN_COUNT];
- // We store the name passed to searchForRecords, so we can
- // hardcode some exceptions into getNextRecord
- std::string searched_name_;
+ // Whether rollback operation has been performed for the database.
+ // Not useful except for purely testing purpose.
+ bool rollbacked_;
- const std::string database_name_;
+ // Remember the mock accessor that was last cloned
+ boost::shared_ptr<MockAccessor> latest_clone_;
+
+ const Domains& getMockRecords(int zone_id) const {
+ if (zone_id == READONLY_ZONE_ID) {
+ return (*readonly_records_);
+ } else if (zone_id == WRITABLE_ZONE_ID) {
+ return (*update_records_);
+ }
+ return (*empty_records_);
+ }
// Adds one record to the current name in the database
// The actual data will not be added to 'records' until
// addCurName() is called
- void addRecord(const std::string& name,
- const std::string& type,
+ void addRecord(const std::string& type,
+ const std::string& ttl,
const std::string& sigtype,
const std::string& rdata) {
std::vector<std::string> columns;
- columns.push_back(name);
columns.push_back(type);
+ columns.push_back(ttl);
columns.push_back(sigtype);
columns.push_back(rdata);
- cur_name.push_back(columns);
+ cur_name_.push_back(columns);
}
// Adds all records we just built with calls to addRecords
- // to the actual fake database. This will clear cur_name,
+ // to the actual fake database. This will clear cur_name_,
// so we can immediately start adding new records.
void addCurName(const std::string& name) {
- ASSERT_EQ(0, records.count(name));
- records[name] = cur_name;
- cur_name.clear();
+ ASSERT_EQ(0, readonly_records_->count(name));
+ // Append the name to all of them
+ for (std::vector<std::vector<std::string> >::iterator
+ i(cur_name_.begin()); i != cur_name_.end(); ++ i) {
+ i->push_back(name);
+ }
+ (*readonly_records_)[name] = cur_name_;
+ cur_name_.clear();
}
// Fills the database with zone data.
@@ -172,118 +665,78 @@ private:
// might not come in 'normal' order)
// It shall immediately fail if you try to add the same name twice.
void fillData() {
- // some plain data
- addRecord("A", "3600", "", "192.0.2.1");
- addRecord("AAAA", "3600", "", "2001:db8::1");
- addRecord("AAAA", "3600", "", "2001:db8::2");
- addCurName("www.example.org.");
-
- addRecord("A", "3600", "", "192.0.2.1");
- addRecord("AAAA", "3600", "", "2001:db8::1");
- addRecord("A", "3600", "", "192.0.2.2");
- addCurName("www2.example.org.");
-
- addRecord("CNAME", "3600", "", "www.example.org.");
- addCurName("cname.example.org.");
-
- // some DNSSEC-'signed' data
- addRecord("A", "3600", "", "192.0.2.1");
- addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
- addRecord("AAAA", "3600", "", "2001:db8::1");
- addRecord("AAAA", "3600", "", "2001:db8::2");
- addRecord("RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addCurName("signed1.example.org.");
- addRecord("CNAME", "3600", "", "www.example.org.");
- addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addCurName("signedcname1.example.org.");
- // special case might fail; sig is for cname, which isn't there (should be ignored)
- // (ignoring of 'normal' other type is done above by www.)
- addRecord("A", "3600", "", "192.0.2.1");
- addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addCurName("acnamesig1.example.org.");
-
- // let's pretend we have a database that is not careful
- // about the order in which it returns data
- addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addRecord("AAAA", "3600", "", "2001:db8::2");
- addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
- addRecord("A", "3600", "", "192.0.2.1");
- addRecord("RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addRecord("AAAA", "3600", "", "2001:db8::1");
- addCurName("signed2.example.org.");
- addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addRecord("CNAME", "3600", "", "www.example.org.");
- addCurName("signedcname2.example.org.");
-
- addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addRecord("A", "3600", "", "192.0.2.1");
- addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addCurName("acnamesig2.example.org.");
-
- addRecord("RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addRecord("RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addRecord("A", "3600", "", "192.0.2.1");
- addCurName("acnamesig3.example.org.");
-
- addRecord("A", "3600", "", "192.0.2.1");
- addRecord("A", "360", "", "192.0.2.2");
- addCurName("ttldiff1.example.org.");
- addRecord("A", "360", "", "192.0.2.1");
- addRecord("A", "3600", "", "192.0.2.2");
- addCurName("ttldiff2.example.org.");
-
- // also add some intentionally bad data
- addRecord("A", "3600", "", "192.0.2.1");
- addRecord("CNAME", "3600", "", "www.example.org.");
- addCurName("badcname1.example.org.");
-
- addRecord("CNAME", "3600", "", "www.example.org.");
- addRecord("A", "3600", "", "192.0.2.1");
- addCurName("badcname2.example.org.");
-
- addRecord("CNAME", "3600", "", "www.example.org.");
- addRecord("CNAME", "3600", "", "www.example2.org.");
- addCurName("badcname3.example.org.");
-
- addRecord("A", "3600", "", "bad");
- addCurName("badrdata.example.org.");
-
- addRecord("BAD_TYPE", "3600", "", "192.0.2.1");
- addCurName("badtype.example.org.");
-
- addRecord("A", "badttl", "", "192.0.2.1");
- addCurName("badttl.example.org.");
-
- addRecord("A", "badttl", "", "192.0.2.1");
- addRecord("RRSIG", "3600", "", "A 5 3 3600 somebaddata 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addCurName("badsig.example.org.");
-
- addRecord("A", "3600", "", "192.0.2.1");
- addRecord("RRSIG", "3600", "TXT", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- addCurName("badsigtype.example.org.");
+ const char* prev_name = NULL;
+ for (int i = 0; TEST_RECORDS[i][0] != NULL; ++i) {
+ if (prev_name != NULL &&
+ strcmp(prev_name, TEST_RECORDS[i][0]) != 0) {
+ addCurName(prev_name);
+ }
+ prev_name = TEST_RECORDS[i][0];
+ addRecord(TEST_RECORDS[i][1], TEST_RECORDS[i][2],
+ TEST_RECORDS[i][3], TEST_RECORDS[i][4]);
+ }
+ addCurName(prev_name);
}
};
+// This tests the default getRecords behaviour, throwing NotImplemented
+TEST(DatabaseConnectionTest, getRecords) {
+ EXPECT_THROW(NopAccessor().getRecords(".", 1, false),
+ isc::NotImplemented);
+}
+
+// This tests the default getAllRecords behaviour, throwing NotImplemented
+TEST(DatabaseConnectionTest, getAllRecords) {
+ // The parameters don't matter
+ EXPECT_THROW(NopAccessor().getAllRecords(1),
+ isc::NotImplemented);
+}
+
+// This test fixture is templated so that we can share (most of) the test
+// cases with different types of data sources. Note that in test cases
+// we need to use 'this' to refer to member variables of the test class.
+template <typename ACCESSOR_TYPE>
class DatabaseClientTest : public ::testing::Test {
public:
- DatabaseClientTest() {
+ DatabaseClientTest() : zname_("example.org"), qname_("www.example.org"),
+ qclass_(RRClass::IN()), qtype_(RRType::A()),
+ rrttl_(3600)
+ {
createClient();
+
+ // set up the commonly used finder.
+ DataSourceClient::FindResult zone(client_->findZone(zname_));
+ assert(zone.code == result::SUCCESS);
+ finder_ = dynamic_pointer_cast<DatabaseClient::Finder>(
+ zone.zone_finder);
+
+ // Test IN/A RDATA to be added in update tests. Intentionally using
+ // different data than the initial data configured in the MockAccessor.
+ rrset_.reset(new RRset(qname_, qclass_, qtype_, rrttl_));
+ rrset_->addRdata(rdata::createRdata(rrset_->getType(),
+ rrset_->getClass(), "192.0.2.2"));
+
+ // And its RRSIG. Also different from the configured one.
+ rrsigset_.reset(new RRset(qname_, qclass_, RRType::RRSIG(),
+ rrttl_));
+ rrsigset_->addRdata(rdata::createRdata(rrsigset_->getType(),
+ rrsigset_->getClass(),
+ "A 5 3 0 20000101000000 "
+ "20000201000000 0 example.org. "
+ "FAKEFAKEFAKE"));
}
+
/*
* We initialize the client from a function, so we can call it multiple
* times per test.
*/
void createClient() {
- current_database_ = new MockAccessor();
- client_.reset(new DatabaseClient(shared_ptr<DatabaseAccessor>(
- current_database_)));
+ current_accessor_ = new ACCESSOR_TYPE();
+ is_mock_ = (dynamic_cast<MockAccessor*>(current_accessor_) != NULL);
+ client_.reset(new DatabaseClient(qclass_,
+ shared_ptr<ACCESSOR_TYPE>(
+ current_accessor_)));
}
- // Will be deleted by client_, just keep the current value for comparison.
- MockAccessor* current_database_;
- shared_ptr<DatabaseClient> client_;
- const std::string database_name_;
/**
* Check the zone finder is a valid one and references the zone ID and
@@ -295,37 +748,254 @@ public:
dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
ASSERT_NE(shared_ptr<DatabaseClient::Finder>(), finder) <<
"Wrong type of finder";
- EXPECT_EQ(42, finder->zone_id());
- EXPECT_EQ(current_database_, &finder->database());
+ if (is_mock_) {
+ EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+ }
+ EXPECT_EQ(current_accessor_, &finder->getAccessor());
+ }
+
+ shared_ptr<DatabaseClient::Finder> getFinder() {
+ DataSourceClient::FindResult zone(client_->findZone(zname_));
+ EXPECT_EQ(result::SUCCESS, zone.code);
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ if (is_mock_) {
+ EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+ }
+
+ return (finder);
}
+
+ // Helper methods for update tests
+ bool isRollbacked(bool expected = false) const {
+ if (is_mock_) {
+ const MockAccessor& mock_accessor =
+ dynamic_cast<const MockAccessor&>(*update_accessor_);
+ return (mock_accessor.isRollbacked());
+ } else {
+ return (expected);
+ }
+ }
+
+ void checkLastAdded(const char* const expected[]) const {
+ if (is_mock_) {
+ const MockAccessor* mock_accessor =
+ dynamic_cast<const MockAccessor*>(current_accessor_);
+ for (int i = 0; i < DatabaseAccessor::ADD_COLUMN_COUNT; ++i) {
+ EXPECT_EQ(expected[i],
+ mock_accessor->getLatestClone()->getLastAdded()[i]);
+ }
+ }
+ }
+
+ void setUpdateAccessor() {
+ if (is_mock_) {
+ const MockAccessor* mock_accessor =
+ dynamic_cast<const MockAccessor*>(current_accessor_);
+ update_accessor_ = mock_accessor->getLatestClone();
+ }
+ }
+
+ // Some tests only work for MockAccessor. We remember whether our accessor
+ // is of that type.
+ bool is_mock_;
+
+ // Will be deleted by client_, just keep the current value for comparison.
+ ACCESSOR_TYPE* current_accessor_;
+ shared_ptr<DatabaseClient> client_;
+ const std::string database_name_;
+
+ // The zone finder of the test zone commonly used in various tests.
+ shared_ptr<DatabaseClient::Finder> finder_;
+
+ // Some shortcut variables for commonly used test parameters
+ const Name zname_; // the zone name stored in the test data source
+ const Name qname_; // commonly used name to be found
+ const RRClass qclass_; // commonly used RR class used with qname
+ const RRType qtype_; // commonly used RR type used with qname
+ const RRTTL rrttl_; // commonly used RR TTL
+ RRsetPtr rrset_; // for adding/deleting an RRset
+ RRsetPtr rrsigset_; // for adding/deleting an RRset
+
+ // update related objects to be tested
+ ZoneUpdaterPtr updater_;
+ shared_ptr<const DatabaseAccessor> update_accessor_;
+
+ // placeholders
+ const std::vector<std::string> empty_rdatas_; // for NXRRSET/NXDOMAIN
+ std::vector<std::string> expected_rdatas_;
+ std::vector<std::string> expected_sig_rdatas_;
};
-TEST_F(DatabaseClientTest, zoneNotFound) {
- DataSourceClient::FindResult zone(client_->findZone(Name("example.com")));
+class TestSQLite3Accessor : public SQLite3Accessor {
+public:
+ TestSQLite3Accessor() : SQLite3Accessor(
+ TEST_DATA_BUILDDIR "/rwtest.sqlite3.copied",
+ RRClass::IN())
+ {
+ startUpdateZone("example.org.", true);
+ string columns[ADD_COLUMN_COUNT];
+ for (int i = 0; TEST_RECORDS[i][0] != NULL; ++i) {
+ columns[ADD_NAME] = TEST_RECORDS[i][0];
+ columns[ADD_REV_NAME] = Name(columns[ADD_NAME]).reverse().toText();
+ columns[ADD_TYPE] = TEST_RECORDS[i][1];
+ columns[ADD_TTL] = TEST_RECORDS[i][2];
+ columns[ADD_SIGTYPE] = TEST_RECORDS[i][3];
+ columns[ADD_RDATA] = TEST_RECORDS[i][4];
+
+ addRecordToZone(columns);
+ }
+ commitUpdateZone();
+ }
+};
+
+// The following two lines instantiate test cases with concrete accessor
+// classes to be tested.
+// XXX: clang++ installed on our FreeBSD buildbot cannot complete compiling
+// this file, seemingly due to the size of the code. We'll consider more
+// complete workaround, but for a short term workaround we'll reduce the
+// number of tested accessor classes (thus reducing the amount of code
+// to be compiled) for this particular environment.
+#if defined(__clang__) && defined(__FreeBSD__)
+typedef ::testing::Types<MockAccessor> TestAccessorTypes;
+#else
+typedef ::testing::Types<MockAccessor, TestSQLite3Accessor> TestAccessorTypes;
+#endif
+
+TYPED_TEST_CASE(DatabaseClientTest, TestAccessorTypes);
+
+// In some cases the entire test fixture is for the mock accessor only.
+// We use the usual TEST_F for them with the corresponding specialized class
+// to make the code simpler.
+typedef DatabaseClientTest<MockAccessor> MockDatabaseClientTest;
+
+TYPED_TEST(DatabaseClientTest, zoneNotFound) {
+ DataSourceClient::FindResult zone(
+ this->client_->findZone(Name("example.com")));
EXPECT_EQ(result::NOTFOUND, zone.code);
}
-TEST_F(DatabaseClientTest, exactZone) {
- DataSourceClient::FindResult zone(client_->findZone(Name("example.org")));
+TYPED_TEST(DatabaseClientTest, exactZone) {
+ DataSourceClient::FindResult zone(
+ this->client_->findZone(Name("example.org")));
EXPECT_EQ(result::SUCCESS, zone.code);
- checkZoneFinder(zone);
+ this->checkZoneFinder(zone);
}
-TEST_F(DatabaseClientTest, superZone) {
- DataSourceClient::FindResult zone(client_->findZone(Name(
+TYPED_TEST(DatabaseClientTest, superZone) {
+ DataSourceClient::FindResult zone(this->client_->findZone(Name(
"sub.example.org")));
EXPECT_EQ(result::PARTIALMATCH, zone.code);
- checkZoneFinder(zone);
+ this->checkZoneFinder(zone);
}
-TEST_F(DatabaseClientTest, noAccessorException) {
+// This test doesn't depend on derived accessor class, so we use TEST().
+TEST(GenericDatabaseClientTest, noAccessorException) {
// We need a dummy variable here; some compiler would regard it a mere
// declaration instead of an instantiation and make the test fail.
- EXPECT_THROW(DatabaseClient dummy((shared_ptr<DatabaseAccessor>())),
+ EXPECT_THROW(DatabaseClient dummy(RRClass::IN(),
+ shared_ptr<DatabaseAccessor>()),
isc::InvalidParameter);
}
-namespace {
+// If the zone doesn't exist, exception is thrown
+TYPED_TEST(DatabaseClientTest, noZoneIterator) {
+ EXPECT_THROW(this->client_->getIterator(Name("example.com")),
+ DataSourceError);
+}
+
+// If the zone doesn't exist and iteration is not implemented, it still throws
+// the exception it doesn't exist
+TEST(GenericDatabaseClientTest, noZoneNotImplementedIterator) {
+ EXPECT_THROW(DatabaseClient(RRClass::IN(),
+ boost::shared_ptr<DatabaseAccessor>(
+ new NopAccessor())).getIterator(
+ Name("example.com")),
+ DataSourceError);
+}
+
+TEST(GenericDatabaseClientTest, notImplementedIterator) {
+ EXPECT_THROW(DatabaseClient(RRClass::IN(), shared_ptr<DatabaseAccessor>(
+ new NopAccessor())).getIterator(Name("example.org")),
+ isc::NotImplemented);
+}
+
+// Pretend a bug in the connection and pass NULL as the context
+// Should not crash, but gracefully throw. Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, nullIteratorContext) {
+ EXPECT_THROW(this->client_->getIterator(Name("null.example.org")),
+ isc::Unexpected);
+}
+
+// It doesn't crash or anything if the zone is completely empty.
+// Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, emptyIterator) {
+ ZoneIteratorPtr it(this->client_->getIterator(Name("empty.example.org")));
+ EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
+ // This is past the end, it should throw
+ EXPECT_THROW(it->getNextRRset(), isc::Unexpected);
+}
+
+// Iterate through a zone
+TYPED_TEST(DatabaseClientTest, iterator) {
+ ZoneIteratorPtr it(this->client_->getIterator(Name("example.org")));
+ ConstRRsetPtr rrset(it->getNextRRset());
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+
+ // The rest of the checks work only for the mock accessor.
+ if (!this->is_mock_) {
+ return;
+ }
+
+ EXPECT_EQ(Name("example.org"), rrset->getName());
+ EXPECT_EQ(RRClass::IN(), rrset->getClass());
+ EXPECT_EQ(RRType::SOA(), rrset->getType());
+ EXPECT_EQ(RRTTL(300), rrset->getTTL());
+ RdataIteratorPtr rit(rrset->getRdataIterator());
+ ASSERT_FALSE(rit->isLast());
+ rit->next();
+ EXPECT_TRUE(rit->isLast());
+
+ rrset = it->getNextRRset();
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+ EXPECT_EQ(Name("x.example.org"), rrset->getName());
+ EXPECT_EQ(RRClass::IN(), rrset->getClass());
+ EXPECT_EQ(RRType::A(), rrset->getType());
+ EXPECT_EQ(RRTTL(300), rrset->getTTL());
+ rit = rrset->getRdataIterator();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("192.0.2.1", rit->getCurrent().toText());
+ rit->next();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("192.0.2.2", rit->getCurrent().toText());
+ rit->next();
+ EXPECT_TRUE(rit->isLast());
+
+ rrset = it->getNextRRset();
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+ EXPECT_EQ(Name("x.example.org"), rrset->getName());
+ EXPECT_EQ(RRClass::IN(), rrset->getClass());
+ EXPECT_EQ(RRType::AAAA(), rrset->getType());
+ EXPECT_EQ(RRTTL(300), rrset->getTTL());
+ EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
+ rit = rrset->getRdataIterator();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("2001:db8::1", rit->getCurrent().toText());
+ rit->next();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("2001:db8::2", rit->getCurrent().toText());
+ rit->next();
+ EXPECT_TRUE(rit->isLast());
+}
+
+// This has inconsistent TTL in the set (the rest, like nonsense in
+// the data is handled in rdata itself). Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, badIterator) {
+ // It should not throw, but get the lowest one of them
+ ZoneIteratorPtr it(this->client_->getIterator(Name("bad.example.org")));
+ EXPECT_EQ(it->getNextRRset()->getTTL(), isc::dns::RRTTL(300));
+}
+
// checks if the given rrset matches the
// given name, class, type and rdatas
void
@@ -346,342 +1016,1395 @@ checkRRset(isc::dns::ConstRRsetPtr rrset,
}
void
-doFindTest(shared_ptr<DatabaseClient::Finder> finder,
+doFindTest(ZoneFinder& finder,
const isc::dns::Name& name,
const isc::dns::RRType& type,
const isc::dns::RRType& expected_type,
const isc::dns::RRTTL expected_ttl,
ZoneFinder::Result expected_result,
const std::vector<std::string>& expected_rdatas,
- const std::vector<std::string>& expected_sig_rdatas)
+ const std::vector<std::string>& expected_sig_rdatas,
+ const isc::dns::Name& expected_name = isc::dns::Name::ROOT_NAME(),
+ const ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT)
{
+ SCOPED_TRACE("doFindTest " + name.toText() + " " + type.toText());
ZoneFinder::FindResult result =
- finder->find(name, type, NULL, ZoneFinder::FIND_DEFAULT);
+ finder.find(name, type, NULL, options);
ASSERT_EQ(expected_result, result.code) << name << " " << type;
- if (expected_rdatas.size() > 0) {
- checkRRset(result.rrset, name, finder->getClass(),
- expected_type, expected_ttl, expected_rdatas);
-
- if (expected_sig_rdatas.size() > 0) {
- checkRRset(result.rrset->getRRsig(), name,
- finder->getClass(), isc::dns::RRType::RRSIG(),
- expected_ttl, expected_sig_rdatas);
- } else {
+ if (!expected_rdatas.empty() && result.rrset) {
+ checkRRset(result.rrset, expected_name != Name(".") ? expected_name :
+ name, finder.getClass(), expected_type, expected_ttl,
+ expected_rdatas);
+
+ if (!expected_sig_rdatas.empty() && result.rrset->getRRsig()) {
+ checkRRset(result.rrset->getRRsig(), expected_name != Name(".") ?
+ expected_name : name, finder.getClass(),
+ isc::dns::RRType::RRSIG(), expected_ttl,
+ expected_sig_rdatas);
+ } else if (expected_sig_rdatas.empty()) {
EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset->getRRsig());
+ } else {
+ ADD_FAILURE() << "Missing RRSIG";
}
- } else {
+ } else if (expected_rdatas.empty()) {
EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset);
+ } else {
+ ADD_FAILURE() << "Missing result";
}
}
-} // end anonymous namespace
-TEST_F(DatabaseClientTest, find) {
- DataSourceClient::FindResult zone(client_->findZone(Name("example.org")));
- ASSERT_EQ(result::SUCCESS, zone.code);
- shared_ptr<DatabaseClient::Finder> finder(
- dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
- EXPECT_EQ(42, finder->zone_id());
- EXPECT_FALSE(current_database_->searchRunning());
- std::vector<std::string> expected_rdatas;
- std::vector<std::string> expected_sig_rdatas;
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("192.0.2.1");
- doFindTest(finder, isc::dns::Name("www.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("192.0.2.1");
- expected_rdatas.push_back("192.0.2.2");
- doFindTest(finder, isc::dns::Name("www2.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("2001:db8::1");
- expected_rdatas.push_back("2001:db8::2");
- doFindTest(finder, isc::dns::Name("www.example.org."),
+TYPED_TEST(DatabaseClientTest, find) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(*finder, isc::dns::Name("www2.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
- isc::dns::RRTTL(3600),
+ this->rrttl_,
ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
+ this->expected_rdatas_, this->expected_sig_rdatas_);
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- doFindTest(finder, isc::dns::Name("www.example.org."),
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
- isc::dns::RRTTL(3600),
+ this->rrttl_,
ZoneFinder::NXRRSET,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("www.example.org.");
- doFindTest(finder, isc::dns::Name("cname.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::CNAME(),
- isc::dns::RRTTL(3600),
- ZoneFinder::CNAME,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("www.example.org.");
- doFindTest(finder, isc::dns::Name("cname.example.org."),
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ doFindTest(*finder, isc::dns::Name("cname.example.org."),
+ this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+ ZoneFinder::CNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ doFindTest(*finder, isc::dns::Name("cname.example.org."),
isc::dns::RRType::CNAME(), isc::dns::RRType::CNAME(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- doFindTest(finder, isc::dns::Name("doesnotexist.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(3600),
- ZoneFinder::NXDOMAIN,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("192.0.2.1");
- expected_sig_rdatas.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- expected_sig_rdatas.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
- doFindTest(finder, isc::dns::Name("signed1.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("2001:db8::1");
- expected_rdatas.push_back("2001:db8::2");
- expected_sig_rdatas.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- doFindTest(finder, isc::dns::Name("signed1.example.org."),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("doesnotexist.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ this->expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed1.example.org."),
isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- doFindTest(finder, isc::dns::Name("signed1.example.org."),
- isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
- isc::dns::RRTTL(3600),
- ZoneFinder::NXRRSET,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("www.example.org.");
- expected_sig_rdatas.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- doFindTest(finder, isc::dns::Name("signedcname1.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::CNAME(),
- isc::dns::RRTTL(3600),
- ZoneFinder::CNAME,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("192.0.2.1");
- expected_sig_rdatas.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- expected_sig_rdatas.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
- doFindTest(finder, isc::dns::Name("signed2.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("2001:db8::2");
- expected_rdatas.push_back("2001:db8::1");
- expected_sig_rdatas.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- doFindTest(finder, isc::dns::Name("signed2.example.org."),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(), this->rrttl_,
+ ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ this->expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signedcname1.example.org."),
+ this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+ ZoneFinder::CNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::2");
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed2.example.org."),
isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- doFindTest(finder, isc::dns::Name("signed2.example.org."),
- isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
- isc::dns::RRTTL(3600),
- ZoneFinder::NXRRSET,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("www.example.org.");
- expected_sig_rdatas.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- doFindTest(finder, isc::dns::Name("signedcname2.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::CNAME(),
- isc::dns::RRTTL(3600),
- ZoneFinder::CNAME,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("192.0.2.1");
- expected_sig_rdatas.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- doFindTest(finder, isc::dns::Name("acnamesig1.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("192.0.2.1");
- expected_sig_rdatas.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- doFindTest(finder, isc::dns::Name("acnamesig2.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("192.0.2.1");
- expected_sig_rdatas.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- doFindTest(finder, isc::dns::Name("acnamesig3.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("192.0.2.1");
- expected_rdatas.push_back("192.0.2.2");
- doFindTest(finder, isc::dns::Name("ttldiff1.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(360),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("192.0.2.1");
- expected_rdatas.push_back("192.0.2.2");
- doFindTest(finder, isc::dns::Name("ttldiff2.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(360),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
-
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(), this->rrttl_,
+ ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ this->expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signedcname2.example.org."),
+ this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+ ZoneFinder::CNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("acnamesig1.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("acnamesig2.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("acnamesig3.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(*finder, isc::dns::Name("ttldiff1.example.org."),
+ this->qtype_, this->qtype_, isc::dns::RRTTL(360),
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(*finder, isc::dns::Name("ttldiff2.example.org."),
+ this->qtype_, this->qtype_, isc::dns::RRTTL(360),
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
EXPECT_THROW(finder->find(isc::dns::Name("badcname1.example.org."),
- isc::dns::RRType::A(),
+ this->qtype_,
NULL, ZoneFinder::FIND_DEFAULT),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
EXPECT_THROW(finder->find(isc::dns::Name("badcname2.example.org."),
- isc::dns::RRType::A(),
+ this->qtype_,
NULL, ZoneFinder::FIND_DEFAULT),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
EXPECT_THROW(finder->find(isc::dns::Name("badcname3.example.org."),
- isc::dns::RRType::A(),
+ this->qtype_,
NULL, ZoneFinder::FIND_DEFAULT),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
EXPECT_THROW(finder->find(isc::dns::Name("badrdata.example.org."),
- isc::dns::RRType::A(),
+ this->qtype_,
NULL, ZoneFinder::FIND_DEFAULT),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
EXPECT_THROW(finder->find(isc::dns::Name("badtype.example.org."),
- isc::dns::RRType::A(),
+ this->qtype_,
NULL, ZoneFinder::FIND_DEFAULT),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
EXPECT_THROW(finder->find(isc::dns::Name("badttl.example.org."),
- isc::dns::RRType::A(),
+ this->qtype_,
NULL, ZoneFinder::FIND_DEFAULT),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
EXPECT_THROW(finder->find(isc::dns::Name("badsig.example.org."),
- isc::dns::RRType::A(),
+ this->qtype_,
NULL, ZoneFinder::FIND_DEFAULT),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
// Trigger the hardcoded exceptions and see if find() has cleaned up
- EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.search."),
- isc::dns::RRType::A(),
- NULL, ZoneFinder::FIND_DEFAULT),
+ if (this->is_mock_) {
+ EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.search."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.search."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ isc::Exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.search."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ std::exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.getnext."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.getnext."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ isc::Exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.getnext."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ std::exception);
+ }
+
+ // This RRSIG has the wrong sigtype field, which should be
+ // an error if we decide to keep using that field
+ // Right now the field is ignored, so it does not error
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("badsigtype.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, findDelegation) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // The apex should not be considered delegation point and we can access
+ // data
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, isc::dns::Name("example.org."),
+ this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // Check when we ask for something below delegation point, we get the NS
+ // (Both when the RRset there exists and doesn't)
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ this->expected_rdatas_.push_back("ns.delegation.example.org.");
+ this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ this->qtype_, isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ doFindTest(*finder, isc::dns::Name("deep.below.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+
+ // Even when we check directly at the delegation point, we should get
+ // the NS
+ doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // And when we ask direcly for the NS, we should still get delegation
+ doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // Now test delegation. If it is below the delegation point, we should get
+ // the DNAME (the one with data under DNAME is invalid zone, but we test
+ // the behaviour anyway just to make sure)
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("dname.example.com.");
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ this->qtype_, isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ doFindTest(*finder, isc::dns::Name("really.deep.below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+
+ // Asking direcly for DNAME should give SUCCESS
+ doFindTest(*finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::DNAME(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // But we don't delegate at DNAME point
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("dname.example.org."),
+ this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // This is broken dname, it contains two targets
+ EXPECT_THROW(finder->find(isc::dns::Name("below.baddname.example.org."),
+ this->qtype_, NULL,
+ ZoneFinder::FIND_DEFAULT),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
- EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.search."),
- isc::dns::RRType::A(),
- NULL, ZoneFinder::FIND_DEFAULT),
+
+ // Broken NS - it lives together with something else
+ EXPECT_THROW(finder->find(isc::dns::Name("brokenns1.example.org."),
+ this->qtype_, NULL,
+ ZoneFinder::FIND_DEFAULT),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
- EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.search."),
- isc::dns::RRType::A(),
- NULL, ZoneFinder::FIND_DEFAULT),
- std::exception);
- EXPECT_FALSE(current_database_->searchRunning());
+ EXPECT_THROW(finder->find(isc::dns::Name("brokenns2.example.org."),
+ this->qtype_, NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+}
- EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.getnext."),
- isc::dns::RRType::A(),
- NULL, ZoneFinder::FIND_DEFAULT),
+TYPED_TEST(DatabaseClientTest, emptyDomain) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // This domain doesn't exist, but a subdomain of it does.
+ // Therefore we should pretend the domain is there, but contains no RRsets
+ doFindTest(*finder, isc::dns::Name("b.example.org."), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+// Glue-OK mode. Just go through NS delegations down (but not through
+// DNAME) and pretend it is not there.
+TYPED_TEST(DatabaseClientTest, glueOK) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("ns.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ doFindTest(*finder, isc::dns::Name("nothere.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("nothere.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("ns.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ this->expected_rdatas_.push_back("ns.delegation.example.org.");
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ // When we request the NS, it should be SUCCESS, not DELEGATION
+ // (different in GLUE_OK)
+ doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("dname.example.com.");
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ this->qtype_, isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("dname.example.org."), ZoneFinder::FIND_GLUE_OK);
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("dname.example.org."), ZoneFinder::FIND_GLUE_OK);
+}
+
+TYPED_TEST(DatabaseClientTest, wildcard) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // First, simple wildcard match
+ // Check also that the RRSIG is added from the wildcard (not modified)
+ this->expected_rdatas_.push_back("192.0.2.5");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::WILDCARD, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ doFindTest(*finder, isc::dns::Name("b.a.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::WILDCARD,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ doFindTest(*finder, isc::dns::Name("b.a.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ // Direct request for this wildcard
+ this->expected_rdatas_.push_back("192.0.2.5");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("*.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("*.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ // This is nonsense, but check it doesn't match by some stupid accident
+ doFindTest(*finder, isc::dns::Name("a.*.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // These should be canceled, since it is below a domain which exitsts
+ doFindTest(*finder, isc::dns::Name("nothing.here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ doFindTest(*finder, isc::dns::Name("cancel.here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ doFindTest(*finder,
+ isc::dns::Name("below.cancel.here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // And this should be just plain empty non-terminal domain, check
+ // the wildcard doesn't hurt it
+ doFindTest(*finder, isc::dns::Name("here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // Also make sure that the wildcard doesn't hurt the original data
+ // below the wildcard
+ this->expected_rdatas_.push_back("2001:db8::5");
+ doFindTest(*finder, isc::dns::Name("cancel.here.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+
+ // How wildcard go together with delegation
+ this->expected_rdatas_.push_back("ns.example.com.");
+ doFindTest(*finder, isc::dns::Name("below.delegatedwild.example.org"),
+ this->qtype_, isc::dns::RRType::NS(), this->rrttl_,
+ ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegatedwild.example.org"));
+ // FIXME: This doesn't look logically OK, GLUE_OK should make it transparent,
+ // so the match should either work or be canceled, but return NXDOMAIN
+ doFindTest(*finder, isc::dns::Name("below.delegatedwild.example.org"),
+ this->qtype_, isc::dns::RRType::NS(), this->rrttl_,
+ ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegatedwild.example.org"),
+ ZoneFinder::FIND_GLUE_OK);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.5");
+ // These are direct matches
+ const char* positive_names[] = {
+ "wild.*.foo.example.org.",
+ "wild.*.foo.*.bar.example.org.",
+ NULL
+ };
+ for (const char** name(positive_names); *name != NULL; ++ name) {
+ doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ }
+
+ // These are wildcard matches against empty nonterminal asterisk
+ this->expected_rdatas_.clear();
+ const char* negative_names[] = {
+ "a.foo.example.org.",
+ "*.foo.example.org.",
+ "foo.example.org.",
+ "wild.bar.foo.example.org.",
+ "baz.foo.*.bar.example.org",
+ "baz.foo.baz.bar.example.org",
+ "*.foo.baz.bar.example.org",
+ "*.foo.*.bar.example.org",
+ "foo.*.bar.example.org",
+ "*.bar.example.org",
+ "bar.example.org",
+ NULL
+ };
+ for (const char** name(negative_names); *name != NULL; ++ name) {
+ doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // FIXME: What should be returned in this case? How does the
+ // DNSSEC logic handle it?
+ }
+
+ const char* negative_dnssec_names[] = {
+ "a.bar.example.org.",
+ "foo.baz.bar.example.org.",
+ "a.foo.bar.example.org.",
+ NULL
+ };
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("wild.*.foo.*.bar.example.org. NSEC");
+ this->expected_sig_rdatas_.clear();
+ for (const char** name(negative_dnssec_names); *name != NULL; ++ name) {
+ doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+ RRType::NSEC(), this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("bao.example.org."), ZoneFinder::FIND_DNSSEC);
+ }
+
+ // Some strange things in the wild node
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("a.cnamewild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::CNAME(),
+ this->rrttl_, ZoneFinder::CNAME,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ doFindTest(*finder, isc::dns::Name("a.nswild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, NXRRSET_NSEC) {
+ // The domain exists, but doesn't have this RRType
+ // So we should get its NSEC
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, wildcardNXRRSET_NSEC) {
+ // The domain exists, but doesn't have this RRType
+ // So we should get its NSEC
+ //
+ // The user will have to query us again to get the correct
+ // answer (eg. prove there's not an exact match)
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("cancel.here.wild.example.org. A NSEC "
+ "RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ // Note that the NSEC name should NOT be synthesized.
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("*.wild.example.org"), ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, NXDOMAIN_NSEC) {
+ // The domain doesn't exist, so we must get the right NSEC
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("www1.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("www.example.org."), ZoneFinder::FIND_DNSSEC);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("acnamesig1.example.org. NS A NSEC RRSIG");
+ // This tests it works correctly in apex (there was a bug, where a check
+ // for NS-alone was there and it would throw).
+ doFindTest(*finder, isc::dns::Name("aa.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("example.org."), ZoneFinder::FIND_DNSSEC);
+
+ // Check that if the DB doesn't support it, the exception from there
+ // is not propagated and it only does not include the NSEC
+ if (!this->is_mock_) {
+ return; // We don't make the real DB to throw
+ }
+ EXPECT_NO_THROW(doFindTest(*finder,
+ isc::dns::Name("notimplnsec.example.org."),
+ isc::dns::RRType::TXT(),
+ isc::dns::RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_, Name::ROOT_NAME(),
+ ZoneFinder::FIND_DNSSEC));
+}
+
+TYPED_TEST(DatabaseClientTest, emptyNonterminalNSEC) {
+ // Same as NXDOMAIN_NSEC, but with empty non-terminal
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("empty.nonterminal.example.org. NSEC");
+ doFindTest(*finder, isc::dns::Name("nonterminal.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("l.example.org."), ZoneFinder::FIND_DNSSEC);
+
+ // Check that if the DB doesn't support it, the exception from there
+ // is not propagated and it only does not include the NSEC
+ if (!this->is_mock_) {
+ return; // We don't make the real DB to throw
+ }
+ EXPECT_NO_THROW(doFindTest(*finder,
+ isc::dns::Name("here.wild.example.org."),
+ isc::dns::RRType::TXT(),
+ isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_,
+ Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC));
+}
+
+TYPED_TEST(DatabaseClientTest, getOrigin) {
+ DataSourceClient::FindResult
+ zone(this->client_->findZone(Name("example.org")));
+ ASSERT_EQ(result::SUCCESS, zone.code);
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ if (this->is_mock_) {
+ EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+ }
+ EXPECT_EQ(this->zname_, finder->getOrigin());
+}
+
+TYPED_TEST(DatabaseClientTest, updaterFinder) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ ASSERT_TRUE(this->updater_);
+
+ // If this update isn't replacing the zone, the finder should work
+ // just like the normal find() case.
+ if (this->is_mock_) {
+ DatabaseClient::Finder& finder = dynamic_cast<DatabaseClient::Finder&>(
+ this->updater_->getFinder());
+ EXPECT_EQ(WRITABLE_ZONE_ID, finder.zone_id());
+ }
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(this->updater_->getFinder(), this->qname_,
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // When replacing the zone, the updater's finder shouldn't see anything
+ // in the zone until something is added.
+ this->updater_.reset();
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ ASSERT_TRUE(this->updater_);
+ if (this->is_mock_) {
+ DatabaseClient::Finder& finder = dynamic_cast<DatabaseClient::Finder&>(
+ this->updater_->getFinder());
+ EXPECT_EQ(WRITABLE_ZONE_ID, finder.zone_id());
+ }
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->empty_rdatas_, this->empty_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, flushZone) {
+ // A simple update case: flush the entire zone
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // Before update, the name exists.
+ EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+ this->qtype_).code);
+
+ // start update in the replace mode. the normal finder should still
+ // be able to see the record, but the updater's finder shouldn't.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->setUpdateAccessor();
+ EXPECT_EQ(ZoneFinder::SUCCESS,
+ finder->find(this->qname_, this->qtype_).code);
+ EXPECT_EQ(ZoneFinder::NXDOMAIN,
+ this->updater_->getFinder().find(this->qname_,
+ this->qtype_).code);
+
+ // commit the update. now the normal finder shouldn't see it.
+ this->updater_->commit();
+ EXPECT_EQ(ZoneFinder::NXDOMAIN, finder->find(this->qname_,
+ this->qtype_).code);
+
+ // Check rollback wasn't accidentally performed.
+ EXPECT_FALSE(this->isRollbacked());
+}
+
+TYPED_TEST(DatabaseClientTest, updateCancel) {
+ // similar to the previous test, but destruct the updater before commit.
+
+ ZoneFinderPtr finder = this->client_->findZone(this->zname_).zone_finder;
+ EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+ this->qtype_).code);
+
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->setUpdateAccessor();
+ EXPECT_EQ(ZoneFinder::NXDOMAIN,
+ this->updater_->getFinder().find(this->qname_,
+ this->qtype_).code);
+ // DB should not have been rolled back yet.
+ EXPECT_FALSE(this->isRollbacked());
+ this->updater_.reset(); // destruct without commit
+
+ // reset() should have triggered rollback (although it doesn't affect
+ // anything to the mock accessor implementation except for the result of
+ // isRollbacked())
+ EXPECT_TRUE(this->isRollbacked(true));
+ EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+ this->qtype_).code);
+}
+
+TYPED_TEST(DatabaseClientTest, exceptionFromRollback) {
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+
+ this->rrset_.reset(new RRset(Name("throw.example.org"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_->addRRset(*this->rrset_);
+ // destruct without commit. The added name will result in an exception
+ // in the MockAccessor's rollback method. It shouldn't be propagated.
+ EXPECT_NO_THROW(this->updater_.reset());
+}
+
+TYPED_TEST(DatabaseClientTest, duplicateCommit) {
+ // duplicate commit. should result in exception.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+ EXPECT_THROW(this->updater_->commit(), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetToNewZone) {
+ // Add a single RRset to a fresh empty zone
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+
+ // Similar to the previous case, but with RRSIG
+ this->updater_.reset();
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->addRRset(*this->rrset_);
+ this->updater_->addRRset(*this->rrsigset_);
+
+ // confirm the expected columns were passed to the accessor (if checkable).
+ const char* const rrsig_added[] = {
+ "www.example.org.", "org.example.www.", "3600", "RRSIG", "A",
+ "A 5 3 0 20000101000000 20000201000000 0 example.org. FAKEFAKEFAKE"
+ };
+ this->checkLastAdded(rrsig_added);
+
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back(
+ rrsig_added[DatabaseAccessor::ADD_RDATA]);
+ {
+ SCOPED_TRACE("add RRset with RRSIG");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ }
+
+ // Add the non RRSIG RRset again, to see the attempt of adding RRSIG
+ // causes any unexpected effect, in particular, whether the SIGTYPE
+ // field might remain.
+ this->updater_->addRRset(*this->rrset_);
+ const char* const rrset_added[] = {
+ "www.example.org.", "org.example.www.", "3600", "A", "", "192.0.2.2"
+ };
+ this->checkLastAdded(rrset_added);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetToCurrentZone) {
+ // Similar to the previous test, but not replacing the existing data.
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->addRRset(*this->rrset_);
+
+ // We should see both old and new data.
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+ this->updater_->commit();
+ {
+ SCOPED_TRACE("add RRset after commit");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addMultipleRRs) {
+ // Similar to the previous case, but the added RRset contains multiple
+ // RRs.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.3"));
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ this->expected_rdatas_.push_back("192.0.2.3");
+ {
+ SCOPED_TRACE("add multiple RRs");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetOfLargerTTL) {
+ // Similar to the previous one, but the TTL of the added RRset is larger
+ // than that of the existing record. The finder should use the smaller
+ // one.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->setTTL(RRTTL(7200));
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset of larger TTL");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetOfSmallerTTL) {
+ // Similar to the previous one, but the added RRset has a smaller TTL.
+ // The added TTL should be used by the finder.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->setTTL(RRTTL(1800));
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset of smaller TTL");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, RRTTL(1800), ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addSameRR) {
+ // Add the same RR as that is already in the data source.
+ // Currently the add interface doesn't try to suppress the duplicate,
+ // neither does the finder. We may want to revisit it in future versions.
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ {
+ SCOPED_TRACE("add same RR");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addDeviantRR) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+ // RR class mismatch. This should be detected and rejected.
+ this->rrset_.reset(new RRset(this->qname_, RRClass::CH(), RRType::TXT(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "test text"));
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+
+ // Out-of-zone owner name. At a higher level this should be rejected,
+ // but it doesn't happen in this interface.
+ this->rrset_.reset(new RRset(Name("example.com"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.100"));
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.100");
+ {
+ // Note: with the find() implementation being more strict about
+ // zone cuts, this test may fail. Then the test should be updated.
+ SCOPED_TRACE("add out-of-zone RR");
+ doFindTest(this->updater_->getFinder(), Name("example.com"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addEmptyRRset) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addAfterCommit) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->commit();
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetWithRRSIG) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->addRRsig(*this->rrsigset_);
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRset) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+
+ // Delete one RR from an RRset
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+
+ // Delete the only RR of a name
+ this->rrset_.reset(new RRset(Name("cname.example.org"), this->qclass_,
+ RRType::CNAME(), this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "www.example.org"));
+ this->updater_->deleteRRset(*this->rrset_);
+
+ // The this->updater_ finder should immediately see the deleted results.
+ {
+ SCOPED_TRACE("delete RRset");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_);
+ doFindTest(this->updater_->getFinder(), Name("cname.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_);
+ }
+
+ // before committing the change, the original finder should see the
+ // original record.
+ {
+ SCOPED_TRACE("delete RRset before commit");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ doFindTest(*finder, Name("cname.example.org"), this->qtype_,
+ RRType::CNAME(), this->rrttl_, ZoneFinder::CNAME,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+
+ // once committed, the record should be removed from the original finder's
+ // view, too.
+ this->updater_->commit();
+ {
+ SCOPED_TRACE("delete RRset after commit");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::NXRRSET, this->empty_rdatas_,
+ this->empty_rdatas_);
+ doFindTest(*finder, Name("cname.example.org"), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->empty_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRsetToNXDOMAIN) {
+ // similar to the previous case, but it removes the only record of the
+ // given name. a subsequent find() should result in NXDOMAIN.
+ this->rrset_.reset(new RRset(Name("cname.example.org"), this->qclass_,
+ RRType::CNAME(), this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "www.example.org"));
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("delete RRset to NXDOMAIN");
+ doFindTest(this->updater_->getFinder(), Name("cname.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteMultipleRRs) {
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, RRType::AAAA(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::1"));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::2"));
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+
+ {
+ SCOPED_TRACE("delete multiple RRs");
+ doFindTest(this->updater_->getFinder(), this->qname_, RRType::AAAA(),
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, partialDelete) {
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, RRType::AAAA(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::1"));
+ // This does not exist in the test data source:
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::3"));
+
+ // deleteRRset should succeed "silently", and subsequent find() should
+ // find the remaining RR.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("partial delete");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ doFindTest(this->updater_->getFinder(), this->qname_, RRType::AAAA(),
+ RRType::AAAA(), this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteNoMatch) {
+ // similar to the previous test, but there's not even a match in the
+ // specified RRset. Essentially there's no difference in the result.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("delete no match");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteWithDifferentTTL) {
+ // Our delete interface simply ignores TTL (may change in a future version)
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ RRTTL(1800)));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("delete RRset with a different TTL");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteDeviantRR) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+ // RR class mismatch. This should be detected and rejected.
+ this->rrset_.reset(new RRset(this->qname_, RRClass::CH(), RRType::TXT(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "test text"));
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+
+ // Out-of-zone owner name. At a higher level this should be rejected,
+ // but it doesn't happen in this interface.
+ this->rrset_.reset(new RRset(Name("example.com"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.100"));
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->rrset_));
+}
+
+TYPED_TEST(DatabaseClientTest, deleteAfterCommit) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->commit();
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteEmptyRRset) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRsetWithRRSIG) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->addRRsig(*this->rrsigset_);
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, compoundUpdate) {
+ // This test case performs an arbitrary chosen add/delete operations
+ // in a single update transaction. Essentially there is nothing new to
+ // test here, but there may be some bugs that was overlooked and can
+ // only happen in the compound update scenario, so we test it.
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+ // add a new RR to an existing RRset
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // delete an existing RR
+ this->rrset_.reset(new RRset(Name("www.example.org"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_->deleteRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // re-add it
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // add a new RR with a new name
+ const Name newname("newname.example.org");
+ const RRType newtype(RRType::AAAA());
+ doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+ this->rrttl_, ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_);
+ this->rrset_.reset(new RRset(newname, this->qclass_, newtype,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::10"));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::11"));
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::10");
+ this->expected_rdatas_.push_back("2001:db8::11");
+ doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ // delete one RR from the previous set
+ this->rrset_.reset(new RRset(newname, this->qclass_, newtype,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::11"));
+ this->updater_->deleteRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::10");
+ doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ // Commit the changes, confirm the entire changes applied.
+ this->updater_->commit();
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.2");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::10");
+ doFindTest(*finder, newname, newtype, newtype, this->rrttl_,
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, previous) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_EQ(Name("www.example.org."),
+ finder->findPreviousName(Name("www2.example.org.")));
+ // Check a name that doesn't exist there
+ EXPECT_EQ(Name("www.example.org."),
+ finder->findPreviousName(Name("www1.example.org.")));
+ if (this->is_mock_) { // We can't really force the DB to throw
+ // Check it doesn't crash or anything if the underlying DB throws
+ DataSourceClient::FindResult
+ zone(this->client_->findZone(Name("bad.example.org")));
+ finder =
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder);
+
+ EXPECT_THROW(finder->findPreviousName(Name("bad.example.org")),
+ isc::NotImplemented);
+ } else {
+ // No need to test this on mock one, because we test only that
+ // the exception gets through
+
+ // A name before the origin
+ EXPECT_THROW(finder->findPreviousName(Name("example.com")),
+ isc::NotImplemented);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, invalidRdata) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_THROW(finder->find(Name("invalidrdata.example.org."), RRType::A()),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
- EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.getnext."),
- isc::dns::RRType::A(),
- NULL, ZoneFinder::FIND_DEFAULT),
+ EXPECT_THROW(finder->find(Name("invalidrdata2.example.org."), RRType::A()),
DataSourceError);
- EXPECT_FALSE(current_database_->searchRunning());
- EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.getnext."),
- isc::dns::RRType::A(),
- NULL, ZoneFinder::FIND_DEFAULT),
- std::exception);
- EXPECT_FALSE(current_database_->searchRunning());
+}
- // This RRSIG has the wrong sigtype field, which should be
- // an error if we decide to keep using that field
- // Right now the field is ignored, so it does not error
- expected_rdatas.clear();
- expected_sig_rdatas.clear();
- expected_rdatas.push_back("192.0.2.1");
- expected_sig_rdatas.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
- doFindTest(finder, isc::dns::Name("badsigtype.example.org."),
- isc::dns::RRType::A(), isc::dns::RRType::A(),
- isc::dns::RRTTL(3600),
- ZoneFinder::SUCCESS,
- expected_rdatas, expected_sig_rdatas);
- EXPECT_FALSE(current_database_->searchRunning());
+TEST_F(MockDatabaseClientTest, missingNSEC) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ /*
+ * FIXME: For now, we can't really distinguish this bogus input
+ * from not-signed zone so we can't throw. But once we can,
+ * enable the original test.
+ */
+#if 0
+ EXPECT_THROW(finder->find(Name("badnsec2.example.org."), RRType::A(), NULL,
+ ZoneFinder::FIND_DNSSEC),
+ DataSourceError);
+#endif
+ doFindTest(*finder, Name("badnsec2.example.org."), RRType::A(),
+ RRType::A(), this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TEST_F(MockDatabaseClientTest, badName) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_THROW(finder->findPreviousName(Name("brokenname.example.org.")),
+ DataSourceError);
}
}
diff --git a/src/lib/datasrc/tests/factory_unittest.cc b/src/lib/datasrc/tests/factory_unittest.cc
new file mode 100644
index 0000000..94d1118
--- /dev/null
+++ b/src/lib/datasrc/tests/factory_unittest.cc
@@ -0,0 +1,175 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/scoped_ptr.hpp>
+
+#include <datasrc/factory.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include <dns/rrclass.h>
+#include <cc/data.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using namespace isc::data;
+
+std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+
+namespace {
+
+TEST(FactoryTest, sqlite3ClientBadConfig) {
+ // We start out by building the configuration data bit by bit,
+ // testing each form of 'bad config', until we have a good one.
+ // Then we do some very basic operation on the client (detailed
+ // tests are left to the implementation-specific backends)
+ ElementPtr config;
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config = Element::create("asdf");
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config = Element::createMap();
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("class", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create("IN"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("database_file", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("database_file", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("database_file", Element::create("/foo/bar/doesnotexist"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ SQLite3Error);
+
+ config->set("database_file", Element::create(SQLITE_DBFILE_EXAMPLE_ORG));
+ DataSourceClientContainer dsc("sqlite3", config);
+
+ DataSourceClient::FindResult result1(
+ dsc.getInstance().findZone(isc::dns::Name("example.org.")));
+ ASSERT_EQ(result::SUCCESS, result1.code);
+
+ DataSourceClient::FindResult result2(
+ dsc.getInstance().findZone(isc::dns::Name("no.such.zone.")));
+ ASSERT_EQ(result::NOTFOUND, result2.code);
+
+ ZoneIteratorPtr iterator(dsc.getInstance().getIterator(
+ isc::dns::Name("example.org.")));
+
+ ZoneUpdaterPtr updater(dsc.getInstance().getUpdater(
+ isc::dns::Name("example.org."), false));
+}
+
+TEST(FactoryTest, memoryClient) {
+ // We start out by building the configuration data bit by bit,
+ // testing each form of 'bad config', until we have a good one.
+ // Then we do some very basic operation on the client (detailed
+ // tests are left to the implementation-specific backends)
+ ElementPtr config;
+ ASSERT_THROW(DataSourceClientContainer client("memory", config),
+ DataSourceConfigError);
+
+ config = Element::create("asdf");
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config = Element::createMap();
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("type", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("type", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("type", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("type", Element::create("memory"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("class", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create("IN"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("zones", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("zones", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("zones", Element::createList());
+ DataSourceClientContainer dsc("memory", config);
+
+ // Once it is able to load some zones, we should add a few tests
+ // here to see that it does.
+ DataSourceClient::FindResult result(
+ dsc.getInstance().findZone(isc::dns::Name("no.such.zone.")));
+ ASSERT_EQ(result::NOTFOUND, result.code);
+
+ ASSERT_THROW(dsc.getInstance().getIterator(isc::dns::Name("example.org.")),
+ DataSourceError);
+
+ ASSERT_THROW(dsc.getInstance().getUpdater(isc::dns::Name("no.such.zone."),
+ false), isc::NotImplemented);
+}
+
+TEST(FactoryTest, badType) {
+ ASSERT_THROW(DataSourceClientContainer("foo", ElementPtr()),
+ DataSourceError);
+}
+
+} // end anonymous namespace
+
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 22723fc..2b854db 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -29,6 +29,8 @@
#include <dns/masterload.h>
#include <datasrc/memory_datasrc.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
#include <gtest/gtest.h>
@@ -138,6 +140,43 @@ TEST_F(InMemoryClientTest, add_find_Zone) {
getOrigin());
}
+TEST_F(InMemoryClientTest, iterator) {
+ // Just some preparations of data
+ boost::shared_ptr<InMemoryZoneFinder>
+ zone(new InMemoryZoneFinder(RRClass::IN(), Name("a")));
+ RRsetPtr aRRsetA(new RRset(Name("a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ aRRsetA->addRdata(rdata::in::A("192.0.2.1"));
+ RRsetPtr aRRsetAAAA(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+ RRTTL(300)));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::1"));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::2"));
+ RRsetPtr subRRsetA(new RRset(Name("sub.x.a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ subRRsetA->addRdata(rdata::in::A("192.0.2.2"));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(zone));
+ // First, the zone is not there, so it should throw
+ EXPECT_THROW(memory_client.getIterator(Name("b")), DataSourceError);
+ // This zone is not there either, even when there's a zone containing this
+ EXPECT_THROW(memory_client.getIterator(Name("x.a")), DataSourceError);
+ // Now, an empty zone
+ ZoneIteratorPtr iterator(memory_client.getIterator(Name("a")));
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+ // It throws Unexpected when we are past the end
+ EXPECT_THROW(iterator->getNextRRset(), isc::Unexpected);
+ EXPECT_EQ(result::SUCCESS, zone->add(aRRsetA));
+ EXPECT_EQ(result::SUCCESS, zone->add(aRRsetAAAA));
+ EXPECT_EQ(result::SUCCESS, zone->add(subRRsetA));
+ // Check it with full zone, one by one.
+ // It should be in ascending order in case of InMemory data source
+ // (isn't guaranteed in general)
+ iterator = memory_client.getIterator(Name("a"));
+ EXPECT_EQ(aRRsetA, iterator->getNextRRset());
+ EXPECT_EQ(aRRsetAAAA, iterator->getNextRRset());
+ EXPECT_EQ(subRRsetA, iterator->getNextRRset());
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+}
+
TEST_F(InMemoryClientTest, getZoneCount) {
EXPECT_EQ(0, memory_client.getZoneCount());
memory_client.addZone(
@@ -158,6 +197,11 @@ TEST_F(InMemoryClientTest, getZoneCount) {
EXPECT_EQ(2, memory_client.getZoneCount());
}
+TEST_F(InMemoryClientTest, startUpdateZone) {
+ EXPECT_THROW(memory_client.getUpdater(Name("example.org"), false),
+ isc::NotImplemented);
+}
+
// A helper callback of masterLoad() used in InMemoryZoneFinderTest.
void
setRRset(RRsetPtr rrset, vector<RRsetPtr*>::iterator& it) {
@@ -351,6 +395,14 @@ public:
};
/**
+ * \brief Check that findPreviousName throws as it should now.
+ */
+TEST_F(InMemoryZoneFinderTest, findPreviousName) {
+ EXPECT_THROW(zone_finder_.findPreviousName(Name("www.example.org")),
+ isc::NotImplemented);
+}
+
+/**
* \brief Test InMemoryZoneFinder::InMemoryZoneFinder constructor.
*
* Takes the created zone finder and checks its properties they are the same
@@ -1058,5 +1110,4 @@ TEST_F(InMemoryZoneFinderTest, getFileName) {
EXPECT_EQ(TEST_DATA_DIR "/root.zone", zone_finder_.getFileName());
EXPECT_TRUE(rootzone.getFileName().empty());
}
-
}
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
index 097c821..3974977 100644
--- a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -11,6 +11,10 @@
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <vector>
+
#include <datasrc/sqlite3_accessor.h>
#include <datasrc/data_source.h>
@@ -19,8 +23,12 @@
#include <gtest/gtest.h>
#include <boost/scoped_ptr.hpp>
+#include <fstream>
+#include <sqlite3.h>
+using namespace std;
using namespace isc::datasrc;
+using boost::shared_ptr;
using isc::data::ConstElementPtr;
using isc::data::Element;
using isc::dns::RRClass;
@@ -35,6 +43,7 @@ std::string SQLITE_DBFILE_EXAMPLE_ROOT = TEST_DATA_DIR "/test-root.sqlite3";
std::string SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
std::string SQLITE_DBFILE_MEMORY = ":memory:";
+std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
// The following file must be non existent and must be non"creatable";
// the sqlite3 library will try to create a new DB file if it doesn't exist,
@@ -42,75 +51,169 @@ std::string SQLITE_DBFILE_MEMORY = ":memory:";
// The "nodir", a non existent directory, is inserted for this purpose.
std::string SQLITE_DBFILE_NOTEXIST = TEST_DATA_DIR "/nodir/notexist";
+// new db file, we don't need this to be a std::string, and given the
+// raw calls we use it in a const char* is more convenient
+const char* SQLITE_NEW_DBFILE = TEST_DATA_BUILDDIR "/newdb.sqlite3";
+
// Opening works (the content is tested in different tests)
TEST(SQLite3Open, common) {
- EXPECT_NO_THROW(SQLite3Database db(SQLITE_DBFILE_EXAMPLE,
- RRClass::IN()));
+ EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE,
+ RRClass::IN()));
}
// The file can't be opened
TEST(SQLite3Open, notExist) {
- EXPECT_THROW(SQLite3Database db(SQLITE_DBFILE_NOTEXIST,
- RRClass::IN()), SQLite3Error);
+ EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_NOTEXIST,
+ RRClass::IN()), SQLite3Error);
}
// It rejects broken DB
TEST(SQLite3Open, brokenDB) {
- EXPECT_THROW(SQLite3Database db(SQLITE_DBFILE_BROKENDB,
- RRClass::IN()), SQLite3Error);
+ EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_BROKENDB,
+ RRClass::IN()), SQLite3Error);
}
// Test we can create the schema on the fly
TEST(SQLite3Open, memoryDB) {
- EXPECT_NO_THROW(SQLite3Database db(SQLITE_DBFILE_MEMORY,
- RRClass::IN()));
+ EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_MEMORY,
+ RRClass::IN()));
}
// Test fixture for querying the db
-class SQLite3Access : public ::testing::Test {
+class SQLite3AccessorTest : public ::testing::Test {
public:
- SQLite3Access() {
+ SQLite3AccessorTest() {
initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::IN());
}
// So it can be re-created with different data
void initAccessor(const std::string& filename, const RRClass& rrclass) {
- db.reset(new SQLite3Database(filename, rrclass));
+ accessor.reset(new SQLite3Accessor(filename, rrclass));
}
- // The tested dbection
- boost::scoped_ptr<SQLite3Database> db;
+ // The tested accessor
+ boost::shared_ptr<SQLite3Accessor> accessor;
};
// This zone exists in the data, so it should be found
-TEST_F(SQLite3Access, getZone) {
- std::pair<bool, int> result(db->getZone(Name("example.com")));
+TEST_F(SQLite3AccessorTest, getZone) {
+ std::pair<bool, int> result(accessor->getZone("example.com."));
EXPECT_TRUE(result.first);
EXPECT_EQ(1, result.second);
}
// But it should find only the zone, nothing below it
-TEST_F(SQLite3Access, subZone) {
- EXPECT_FALSE(db->getZone(Name("sub.example.com")).first);
+TEST_F(SQLite3AccessorTest, subZone) {
+ EXPECT_FALSE(accessor->getZone("sub.example.com.").first);
}
// This zone is not there at all
-TEST_F(SQLite3Access, noZone) {
- EXPECT_FALSE(db->getZone(Name("example.org")).first);
+TEST_F(SQLite3AccessorTest, noZone) {
+ EXPECT_FALSE(accessor->getZone("example.org.").first);
}
// This zone is there, but in different class
-TEST_F(SQLite3Access, noClass) {
+TEST_F(SQLite3AccessorTest, noClass) {
initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::CH());
- EXPECT_FALSE(db->getZone(Name("example.com")).first);
+ EXPECT_FALSE(accessor->getZone("example.com.").first);
+}
+
+// This tests the iterator context
+TEST_F(SQLite3AccessorTest, iterator) {
+ // Our test zone is conveniently small, but not empty
+ initAccessor(SQLITE_DBFILE_EXAMPLE_ORG, RRClass::IN());
+
+ const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+ ASSERT_TRUE(zone_info.first);
+
+ // Get the iterator context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor->getAllRecords(zone_info.second));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context);
+
+ std::string data[DatabaseAccessor::COLUMN_COUNT];
+ // Get and check the first and only record
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("dname.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("dname.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("dname2.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("dname2.foo.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("MX", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("10 mail.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns1.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns2.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns3.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("SOA", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200",
+ data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.10", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("mail.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.101", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.1", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("www.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ // Check there's no other
+ EXPECT_FALSE(context->getNext(data));
+
+ // And make sure calling it again won't cause problems.
+ EXPECT_FALSE(context->getNext(data));
}
TEST(SQLite3Open, getDBNameExample2) {
- SQLite3Database db(SQLITE_DBFILE_EXAMPLE2, RRClass::IN());
- EXPECT_EQ(SQLITE_DBNAME_EXAMPLE2, db.getDBName());
+ SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE2, RRClass::IN());
+ EXPECT_EQ(SQLITE_DBNAME_EXAMPLE2, accessor.getDBName());
}
TEST(SQLite3Open, getDBNameExampleROOT) {
- SQLite3Database db(SQLITE_DBFILE_EXAMPLE_ROOT, RRClass::IN());
- EXPECT_EQ(SQLITE_DBNAME_EXAMPLE_ROOT, db.getDBName());
+ SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE_ROOT, RRClass::IN());
+ EXPECT_EQ(SQLITE_DBNAME_EXAMPLE_ROOT, accessor.getDBName());
}
// Simple function to cound the number of records for
@@ -120,104 +223,97 @@ checkRecordRow(const std::string columns[],
const std::string& field0,
const std::string& field1,
const std::string& field2,
- const std::string& field3)
+ const std::string& field3,
+ const std::string& field4)
{
- EXPECT_EQ(field0, columns[0]);
- EXPECT_EQ(field1, columns[1]);
- EXPECT_EQ(field2, columns[2]);
- EXPECT_EQ(field3, columns[3]);
+ EXPECT_EQ(field0, columns[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ(field1, columns[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ(field2, columns[DatabaseAccessor::SIGTYPE_COLUMN]);
+ EXPECT_EQ(field3, columns[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ(field4, columns[DatabaseAccessor::NAME_COLUMN]);
}
-TEST_F(SQLite3Access, getRecords) {
- const std::pair<bool, int> zone_info(db->getZone(Name("example.com")));
+TEST_F(SQLite3AccessorTest, getRecords) {
+ const std::pair<bool, int> zone_info(accessor->getZone("example.com."));
ASSERT_TRUE(zone_info.first);
const int zone_id = zone_info.second;
ASSERT_EQ(1, zone_id);
- const size_t column_count = DatabaseAccessor::COLUMN_COUNT;
- std::string columns[column_count];
-
- // without search, getNext() should return false
- EXPECT_FALSE(db->getNextRecord(columns, column_count));
- checkRecordRow(columns, "", "", "", "");
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
- db->searchForRecords(zone_id, "foo.bar.");
- EXPECT_FALSE(db->getNextRecord(columns, column_count));
- checkRecordRow(columns, "", "", "", "");
-
- db->searchForRecords(zone_id, "");
- EXPECT_FALSE(db->getNextRecord(columns, column_count));
- checkRecordRow(columns, "", "", "", "");
-
- // Should error on a bad number of columns
- EXPECT_THROW(db->getNextRecord(columns, 3), DataSourceError);
- EXPECT_THROW(db->getNextRecord(columns, 5), DataSourceError);
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor->getRecords("foo.bar", 1));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(),
+ context);
+ EXPECT_FALSE(context->getNext(columns));
+ checkRecordRow(columns, "", "", "", "", "");
// now try some real searches
- db->searchForRecords(zone_id, "foo.example.com.");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ context = accessor->getRecords("foo.example.com.", zone_id);
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "CNAME", "3600", "",
- "cnametest.example.org.");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "cnametest.example.org.", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "RRSIG", "3600", "CNAME",
"CNAME 5 3 3600 20100322084538 20100220084538 33495 "
- "example.com. FAKEFAKEFAKEFAKE");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "NSEC", "7200", "",
- "mail.example.com. CNAME RRSIG NSEC");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "mail.example.com. CNAME RRSIG NSEC", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "RRSIG", "7200", "NSEC",
"NSEC 5 3 7200 20100322084538 20100220084538 33495 "
- "example.com. FAKEFAKEFAKEFAKE");
- EXPECT_FALSE(db->getNextRecord(columns, column_count));
+ "example.com. FAKEFAKEFAKEFAKE", "");
+ EXPECT_FALSE(context->getNext(columns));
+
// with no more records, the array should not have been modified
checkRecordRow(columns, "RRSIG", "7200", "NSEC",
"NSEC 5 3 7200 20100322084538 20100220084538 33495 "
- "example.com. FAKEFAKEFAKEFAKE");
+ "example.com. FAKEFAKEFAKEFAKE", "");
- db->searchForRecords(zone_id, "example.com.");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ context = accessor->getRecords("example.com.", zone_id);
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "SOA", "3600", "",
"master.example.com. admin.example.com. "
- "1234 3600 1800 2419200 7200");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "1234 3600 1800 2419200 7200", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "RRSIG", "3600", "SOA",
"SOA 5 2 3600 20100322084538 20100220084538 "
- "33495 example.com. FAKEFAKEFAKEFAKE");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
- checkRecordRow(columns, "NS", "1200", "", "dns01.example.com.");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
- checkRecordRow(columns, "NS", "3600", "", "dns02.example.com.");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
- checkRecordRow(columns, "NS", "1800", "", "dns03.example.com.");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "1200", "", "dns01.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "3600", "", "dns02.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "1800", "", "dns03.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "RRSIG", "3600", "NS",
"NS 5 2 3600 20100322084538 20100220084538 "
- "33495 example.com. FAKEFAKEFAKEFAKE");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
- checkRecordRow(columns, "MX", "3600", "", "10 mail.example.com.");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "MX", "3600", "", "10 mail.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "MX", "3600", "",
- "20 mail.subzone.example.com.");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "20 mail.subzone.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "RRSIG", "3600", "MX",
"MX 5 2 3600 20100322084538 20100220084538 "
- "33495 example.com. FAKEFAKEFAKEFAKE");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "NSEC", "7200", "",
- "cname-ext.example.com. NS SOA MX RRSIG NSEC DNSKEY");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "cname-ext.example.com. NS SOA MX RRSIG NSEC DNSKEY", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "RRSIG", "7200", "NSEC",
"NSEC 5 2 7200 20100322084538 20100220084538 "
- "33495 example.com. FAKEFAKEFAKEFAKE");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "DNSKEY", "3600", "",
"256 3 5 AwEAAcOUBllYc1hf7ND9uDy+Yz1BF3sI0m4q NGV7W"
"cTD0WEiuV7IjXgHE36fCmS9QsUxSSOV o1I/FMxI2PJVqTYHkX"
"FBS7AzLGsQYMU7UjBZ SotBJ6Imt5pXMu+lEDNy8TOUzG3xm7g"
- "0qcbW YF6qCEfvZoBtAqi5Rk7Mlrqs8agxYyMx");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "0qcbW YF6qCEfvZoBtAqi5Rk7Mlrqs8agxYyMx", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "DNSKEY", "3600", "",
"257 3 5 AwEAAe5WFbxdCPq2jZrZhlMj7oJdff3W7syJ tbvzg"
"62tRx0gkoCDoBI9DPjlOQG0UAbj+xUV 4HQZJStJaZ+fHU5AwV"
@@ -226,20 +322,452 @@ TEST_F(SQLite3Access, getRecords) {
"qiODyNZYQ+ZrLmF0KIJ2yPN3iO6Zq 23TaOrVTjB7d1a/h31OD"
"fiHAxFHrkY3t3D5J R9Nsl/7fdRmSznwtcSDgLXBoFEYmw6p86"
"Acv RyoYNcL1SXjaKVLG5jyU3UR+LcGZT5t/0xGf oIK/aKwEN"
- "rsjcKZZj660b1M=");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "rsjcKZZj660b1M=", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
"DNSKEY 5 2 3600 20100322084538 20100220084538 "
- "4456 example.com. FAKEFAKEFAKEFAKE");
- ASSERT_TRUE(db->getNextRecord(columns, column_count));
+ "4456 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
"DNSKEY 5 2 3600 20100322084538 20100220084538 "
- "33495 example.com. FAKEFAKEFAKEFAKE");
- EXPECT_FALSE(db->getNextRecord(columns, column_count));
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ EXPECT_FALSE(context->getNext(columns));
// getnextrecord returning false should mean array is not altered
checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
"DNSKEY 5 2 3600 20100322084538 20100220084538 "
- "33495 example.com. FAKEFAKEFAKEFAKE");
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+
+ // check that another getNext does not cause problems
+ EXPECT_FALSE(context->getNext(columns));
+
+ // Try searching for subdomain
+ // There's foo.bar.example.com in the data
+ context = accessor->getRecords("bar.example.com.", zone_id, true);
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "A", "3600", "", "192.0.2.1", "");
+ EXPECT_FALSE(context->getNext(columns));
+ // But we shouldn't match mix.example.com here
+ context = accessor->getRecords("ix.example.com.", zone_id, true);
+ EXPECT_FALSE(context->getNext(columns));
+}
+
+TEST_F(SQLite3AccessorTest, findPrevious) {
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.example.dns02."));
+ // A name that doesn't exist
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.example.dns01x."));
+ // Largest name
+ EXPECT_EQ("www.example.com.",
+ accessor->findPreviousName(1, "com.example.wwww"));
+ // Out of zone after the last name
+ EXPECT_EQ("www.example.com.",
+ accessor->findPreviousName(1, "org.example."));
+ // Case insensitive?
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.exaMple.DNS02."));
+ // A name that doesn't exist
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.exaMple.DNS01X."));
+ // The DB contains foo.bar.example.com., which would be in between
+ // these two names. However, that one does not have an NSEC record,
+ // which is how this database recognizes glue data, so it should
+ // be skipped.
+ EXPECT_EQ("example.com.",
+ accessor->findPreviousName(1, "com.example.cname-ext."));
+ // Throw when we are before the origin
+ EXPECT_THROW(accessor->findPreviousName(1, "com.example."),
+ isc::NotImplemented);
+ EXPECT_THROW(accessor->findPreviousName(1, "a.example."),
+ isc::NotImplemented);
+}
+
+TEST_F(SQLite3AccessorTest, findPreviousNoData) {
+ // This one doesn't hold any NSEC records, so it shouldn't work
+ // The underlying DB/data don't support DNSSEC, so it's not implemented
+ // (does it make sense? Or different exception here?)
+ EXPECT_THROW(accessor->findPreviousName(3, "com.example.sql2.www."),
+ isc::NotImplemented);
+}
+
+// Test fixture for creating a db that automatically deletes it before start,
+// and when done
+class SQLite3Create : public ::testing::Test {
+public:
+ SQLite3Create() {
+ remove(SQLITE_NEW_DBFILE);
+ }
+
+ ~SQLite3Create() {
+ remove(SQLITE_NEW_DBFILE);
+ }
+};
+
+bool isReadable(const char* filename) {
+ return (std::ifstream(filename).is_open());
+}
+
+TEST_F(SQLite3Create, creationtest) {
+ ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+ // Should simply be created
+ SQLite3Accessor accessor(SQLITE_NEW_DBFILE, RRClass::IN());
+ ASSERT_TRUE(isReadable(SQLITE_NEW_DBFILE));
+}
+
+TEST_F(SQLite3Create, emptytest) {
+ ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+
+ // open one manualle
+ sqlite3* db;
+ ASSERT_EQ(SQLITE_OK, sqlite3_open(SQLITE_NEW_DBFILE, &db));
+
+ // empty, but not locked, so creating it now should work
+ SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, RRClass::IN());
+
+ sqlite3_close(db);
+
+ // should work now that we closed it
+ SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, RRClass::IN());
+}
+
+TEST_F(SQLite3Create, lockedtest) {
+ ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+
+ // open one manually
+ sqlite3* db;
+ ASSERT_EQ(SQLITE_OK, sqlite3_open(SQLITE_NEW_DBFILE, &db));
+ sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL, NULL);
+
+ // should not be able to open it
+ EXPECT_THROW(SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, RRClass::IN()),
+ SQLite3Error);
+
+ sqlite3_exec(db, "ROLLBACK TRANSACTION", NULL, NULL, NULL);
+
+ // should work now that we closed it
+ SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, RRClass::IN());
}
+TEST_F(SQLite3AccessorTest, clone) {
+ shared_ptr<DatabaseAccessor> cloned = accessor->clone();
+ EXPECT_EQ(accessor->getDBName(), cloned->getDBName());
+
+ // The cloned accessor should have a separate connection and search
+ // context, so it should be able to perform search in concurrent with
+ // the original accessor.
+ string columns1[DatabaseAccessor::COLUMN_COUNT];
+ string columns2[DatabaseAccessor::COLUMN_COUNT];
+
+ const std::pair<bool, int> zone_info1(
+ accessor->getZone("example.com."));
+ DatabaseAccessor::IteratorContextPtr iterator1 =
+ accessor->getRecords("foo.example.com.", zone_info1.second);
+ const std::pair<bool, int> zone_info2(
+ accessor->getZone("example.com."));
+ DatabaseAccessor::IteratorContextPtr iterator2 =
+ cloned->getRecords("foo.example.com.", zone_info2.second);
+
+ ASSERT_TRUE(iterator1->getNext(columns1));
+ checkRecordRow(columns1, "CNAME", "3600", "", "cnametest.example.org.",
+ "");
+
+ ASSERT_TRUE(iterator2->getNext(columns2));
+ checkRecordRow(columns2, "CNAME", "3600", "", "cnametest.example.org.",
+ "");
+}
+
+//
+// Commonly used data for update tests
+//
+const char* const common_expected_data[] = {
+ // Test record already stored in the tested sqlite3 DB file.
+ "foo.bar.example.com.", "com.example.bar.foo.", "3600", "A", "",
+ "192.0.2.1"
+};
+const char* const new_data[] = {
+ // Newly added data commonly used by some of the tests below
+ "newdata.example.com.", "com.example.newdata.", "3600", "A", "",
+ "192.0.2.1"
+};
+const char* const deleted_data[] = {
+ // Existing data to be removed commonly used by some of the tests below
+ "foo.bar.example.com.", "A", "192.0.2.1"
+};
+
+class SQLite3Update : public SQLite3AccessorTest {
+protected:
+ SQLite3Update() {
+ // Note: if "installing" the test file fails some of the subsequent
+ // tests would fail.
+ const char *install_cmd = INSTALL_PROG " " TEST_DATA_DIR
+ "/test.sqlite3 " TEST_DATA_BUILDDIR
+ "/test.sqlite3.copied";
+ if (system(install_cmd) != 0) {
+ // any exception will do, this is failure in test setup, but nice
+ // to show the command that fails, and shouldn't be caught
+ isc_throw(isc::Exception,
+ "Error setting up; command failed: " << install_cmd);
+ };
+ initAccessor(TEST_DATA_BUILDDIR "/test.sqlite3.copied", RRClass::IN());
+ zone_id = accessor->getZone("example.com.").second;
+ another_accessor.reset(new SQLite3Accessor(
+ TEST_DATA_BUILDDIR "/test.sqlite3.copied",
+ RRClass::IN()));
+ expected_stored.push_back(common_expected_data);
+ }
+
+ int zone_id;
+ std::string get_columns[DatabaseAccessor::COLUMN_COUNT];
+ std::string add_columns[DatabaseAccessor::ADD_COLUMN_COUNT];
+ std::string del_params[DatabaseAccessor::DEL_PARAM_COUNT];
+
+ vector<const char* const*> expected_stored; // placeholder for checkRecords
+ vector<const char* const*> empty_stored; // indicate no corresponding data
+
+ // Another accessor, emulating one running on a different process/thread
+ shared_ptr<SQLite3Accessor> another_accessor;
+ DatabaseAccessor::IteratorContextPtr iterator;
+};
+
+void
+checkRecords(SQLite3Accessor& accessor, int zone_id, const std::string& name,
+ vector<const char* const*> expected_rows)
+{
+ DatabaseAccessor::IteratorContextPtr iterator =
+ accessor.getRecords(name, zone_id);
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ vector<const char* const*>::const_iterator it = expected_rows.begin();
+ while (iterator->getNext(columns)) {
+ ASSERT_TRUE(it != expected_rows.end());
+ checkRecordRow(columns, (*it)[3], (*it)[2], (*it)[4], (*it)[5], "");
+ ++it;
+ }
+ EXPECT_TRUE(it == expected_rows.end());
+}
+
+TEST_F(SQLite3Update, emptyUpdate) {
+ // If we do nothing between start and commit, the zone content
+ // should be intact.
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ accessor->commitUpdateZone();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, flushZone) {
+ // With 'replace' being true startUpdateZone() will flush the existing
+ // zone content.
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+ accessor->commitUpdateZone();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, readWhileUpdate) {
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Until commit is done, the other accessor should see the old data
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+
+ // Once the changes are committed, the other accessor will see the new
+ // data.
+ accessor->commitUpdateZone();
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ empty_stored);
+}
+
+TEST_F(SQLite3Update, rollback) {
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Rollback will revert the change made by startUpdateZone(, true).
+ accessor->rollbackUpdateZone();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, rollbackFailure) {
+ // This test emulates a rare scenario of making rollback attempt fail.
+ // The iterator is paused in the middle of getting records, which prevents
+ // the rollback operation at the end of the test.
+
+ string columns[DatabaseAccessor::COLUMN_COUNT];
+ iterator = accessor->getRecords("example.com.", zone_id);
+ EXPECT_TRUE(iterator->getNext(columns));
+
+ accessor->startUpdateZone("example.com.", true);
+ EXPECT_THROW(accessor->rollbackUpdateZone(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, commitConflict) {
+ // Start reading the DB by another accessor. We should stop at a single
+ // call to getNextRecord() to keep holding the lock.
+ iterator = another_accessor->getRecords("foo.example.com.", zone_id);
+ EXPECT_TRUE(iterator->getNext(get_columns));
+
+ // Due to getNextRecord() above, the other accessor holds a DB lock,
+ // which will prevent commit.
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+ EXPECT_THROW(accessor->commitUpdateZone(), DataSourceError);
+ accessor->rollbackUpdateZone(); // rollback should still succeed
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, updateConflict) {
+ // Similar to the previous case, but this is a conflict with another
+ // update attempt. Note that these two accessors modify disjoint sets
+ // of data; sqlite3 only has a coarse-grained lock so we cannot allow
+ // these updates to run concurrently.
+ EXPECT_TRUE(another_accessor->startUpdateZone("sql1.example.com.",
+ true).first);
+ EXPECT_THROW(accessor->startUpdateZone("example.com.", true),
+ DataSourceError);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Once we rollback the other attempt of change, we should be able to
+ // start and commit the transaction using the main accessor.
+ another_accessor->rollbackUpdateZone();
+ accessor->startUpdateZone("example.com.", true);
+ accessor->commitUpdateZone();
+}
+
+TEST_F(SQLite3Update, duplicateUpdate) {
+ accessor->startUpdateZone("example.com.", false);
+ EXPECT_THROW(accessor->startUpdateZone("example.com.", false),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, commitWithoutTransaction) {
+ EXPECT_THROW(accessor->commitUpdateZone(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, rollbackWithoutTransaction) {
+ EXPECT_THROW(accessor->rollbackUpdateZone(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, addRecord) {
+ // Before update, there should be no record for this name
+ checkRecords(*accessor, zone_id, "newdata.example.com.", empty_stored);
+
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(new_data, new_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+
+ expected_stored.clear();
+ expected_stored.push_back(new_data);
+ checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+
+ // Commit the change, and confirm the new data is still there.
+ accessor->commitUpdateZone();
+ checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, addThenRollback) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(new_data, new_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+
+ expected_stored.clear();
+ expected_stored.push_back(new_data);
+ checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+
+ accessor->rollbackUpdateZone();
+ checkRecords(*accessor, zone_id, "newdata.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, duplicateAdd) {
+ const char* const dup_data[] = {
+ "foo.bar.example.com.", "com.example.bar.foo.", "3600", "A", "",
+ "192.0.2.1"
+ };
+ expected_stored.clear();
+ expected_stored.push_back(dup_data);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Adding exactly the same data. As this backend is "dumb", another
+ // row of the same content will be inserted.
+ copy(dup_data, dup_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ accessor->addRecordToZone(add_columns);
+ expected_stored.push_back(dup_data);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, invalidAdd) {
+ // An attempt of add before an explicit start of transaction
+ EXPECT_THROW(accessor->addRecordToZone(add_columns), DataSourceError);
+}
+
+TEST_F(SQLite3Update, deleteRecord) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Commit the change, and confirm the deleted data still isn't there.
+ accessor->commitUpdateZone();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, deleteThenRollback) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Rollback the change, and confirm the data still exists.
+ accessor->rollbackUpdateZone();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, deleteNonexistent) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+
+ // Replace the name with a non existent one, then try to delete it.
+ // nothing should happen.
+ del_params[DatabaseAccessor::DEL_NAME] = "no-such-name.example.com.";
+ checkRecords(*accessor, zone_id, "no-such-name.example.com.",
+ empty_stored);
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "no-such-name.example.com.",
+ empty_stored);
+
+ // Name exists but the RR type is different. Delete attempt shouldn't
+ // delete only by name.
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ del_params[DatabaseAccessor::DEL_TYPE] = "AAAA";
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Similar to the previous case, but RDATA is different.
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ del_params[DatabaseAccessor::DEL_RDATA] = "192.0.2.2";
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, invalidDelete) {
+ // An attempt of delete before an explicit start of transaction
+ EXPECT_THROW(accessor->deleteRecordInZone(del_params), DataSourceError);
+}
} // end anonymous namespace
diff --git a/src/lib/datasrc/tests/static_unittest.cc b/src/lib/datasrc/tests/static_unittest.cc
index a11e889..4c9fe42 100644
--- a/src/lib/datasrc/tests/static_unittest.cc
+++ b/src/lib/datasrc/tests/static_unittest.cc
@@ -53,6 +53,7 @@ protected:
// NOTE: in addition, the order of the following items matter.
authors_data.push_back("Chen Zhengzhang");
+ authors_data.push_back("Dmitriy Volodin");
authors_data.push_back("Evan Hunt");
authors_data.push_back("Haidong Wang");
authors_data.push_back("Han Feng");
diff --git a/src/lib/datasrc/tests/testdata/Makefile.am b/src/lib/datasrc/tests/testdata/Makefile.am
new file mode 100644
index 0000000..64ae955
--- /dev/null
+++ b/src/lib/datasrc/tests/testdata/Makefile.am
@@ -0,0 +1,6 @@
+CLEANFILES = *.copied
+BUILT_SOURCES = rwtest.sqlite3.copied
+
+# We use install-sh with the -m option to make sure it's writable
+rwtest.sqlite3.copied: $(srcdir)/rwtest.sqlite3
+ $(top_srcdir)/install-sh -m 644 $(srcdir)/rwtest.sqlite3 $@
diff --git a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 b/src/lib/datasrc/tests/testdata/rwtest.sqlite3
new file mode 100644
index 0000000..ce95a1d
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/rwtest.sqlite3 differ
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index 0dacc5d..c83b14b 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -15,59 +15,89 @@
#ifndef __ZONE_H
#define __ZONE_H 1
-#include <datasrc/result.h>
+#include <dns/rrset.h>
#include <dns/rrsetlist.h>
+#include <datasrc/result.h>
+
namespace isc {
namespace datasrc {
-/// \brief The base class for a single authoritative zone
-///
-/// The \c Zone class is an abstract base class for representing
-/// a DNS zone as part of data source.
+/// \brief The base class to search a zone for RRsets
///
-/// At the moment this is provided mainly for making the \c ZoneTable class
-/// and the authoritative query logic testable, and only provides a minimal
-/// set of features.
-/// This is why this class is defined in the same header file, but it may
-/// have to move to a separate header file when we understand what is
-/// necessary for this class for actual operation.
+/// The \c ZoneFinder class is an abstract base class for representing
+/// an object that performs DNS lookups in a specific zone accessible via
+/// a data source. In general, different types of data sources (in-memory,
+/// database-based, etc) define their own derived classes of \c ZoneFinder,
+/// implementing ways to retrieve the required data through the common
+/// interfaces declared in the base class. Each concrete \c ZoneFinder
+/// object is therefore (conceptually) associated with a specific zone
+/// of one specific data source instance.
///
-/// The idea is to provide a specific derived zone class for each data
-/// source, beginning with in memory one. At that point the derived classes
-/// will have more specific features. For example, they will maintain
-/// information about the location of a zone file, whether it's loaded in
-/// memory, etc.
+/// The origin name and the RR class of the associated zone are available
+/// via the \c getOrigin() and \c getClass() methods, respectively.
///
-/// It's not yet clear how the derived zone classes work with various other
-/// data sources when we integrate these components, but one possibility is
-/// something like this:
-/// - If the underlying database such as some variant of SQL doesn't have an
-/// explicit representation of zones (as part of public interface), we can
-/// probably use a "default" zone class that simply encapsulates the
-/// corresponding data source and calls a common "find" like method.
-/// - Some data source may want to specialize it by inheritance as an
-/// optimization. For example, in the current schema design of the sqlite3
-/// data source, its (derived) zone class would contain the information of
-/// the "zone ID".
+/// The most important method of this class is \c find(), which performs
+/// the lookup for a given domain and type. See the description of the
+/// method for details.
///
-/// <b>Note:</b> Unlike some other abstract base classes we don't name the
-/// class beginning with "Abstract". This is because we want to have
-/// commonly used definitions such as \c Result and \c ZoneFinderPtr, and we
-/// want to make them look more intuitive.
+/// \note It's not clear whether we should request that a zone finder form a
+/// "transaction", that is, whether to ensure the finder is not susceptible
+/// to changes made by someone else than the creator of the finder. If we
+/// don't request that, for example, two different lookup results for the
+/// same name and type can be different if other threads or programs make
+/// updates to the zone between the lookups. We should revisit this point
+/// as we gain more experiences.
class ZoneFinder {
public:
/// Result codes of the \c find() method.
///
/// Note: the codes are tentative. We may need more, or we may find
/// some of them unnecessary as we implement more details.
+ ///
+ /// Some are synonyms of others in terms of RCODE returned to user.
+ /// But they help the logic to decide if it should ask for a NSEC
+ /// that covers something or not (for example, in case of NXRRSET,
+ /// the directly returned NSEC is sufficient, but with wildcard one,
+ /// we need to add one proving there's no exact match and this is
+ /// actually the best wildcard we have). Data sources that don't
+ /// support DNSSEC don't need to distinguish them.
+ ///
+ /// In case of NXRRSET related results, the returned NSEC record
+ /// belongs to the domain which would provide the result if it
+ /// contained the correct type (in case of NXRRSET, it is the queried
+ /// domain, in case of WILDCARD_NXRRSET, it is the wildcard domain
+ /// that matched the query name). In case of an empty nonterminal,
+ /// an NSEC is provided for the interval where the empty nonterminal
+ /// lives. The end of the interval is the subdomain causing existence
+ /// of the empty nonterminal (if there's sub.x.example.com, and no record
+ /// in x.example.com, then x.example.com exists implicitly - is the empty
+ /// nonterminal and sub.x.example.com is the subdomain causing it).
+ ///
+ /// Examples: if zone "example.com" has the following record:
+ /// \code
+ /// a.b.example.com. NSEC c.example.com.
+ /// \endcode
+ /// a call to \c find() for "b.example.com." will result in NXRRSET,
+ /// and if the FIND_DNSSEC option is set this NSEC will be returned.
+ /// Likewise, if zone "example.org" has the following record,
+ /// \code
+ /// x.*.example.org. NSEC a.example.org.
+ /// \endcode
+ /// a call to \c find() for "y.example.org" will result in
+ /// WILDCARD_NXRRSET (*.example.org is an empty nonterminal wildcard node),
+ /// and if the FIND_DNSSEC option is set this NSEC will be returned.
+ ///
+ /// In case of NXDOMAIN, the returned NSEC covers the queried domain.
enum Result {
SUCCESS, ///< An exact match is found.
DELEGATION, ///< The search encounters a zone cut.
NXDOMAIN, ///< There is no domain name that matches the search name
NXRRSET, ///< There is a matching name but no RRset of the search type
CNAME, ///< The search encounters and returns a CNAME RR
- DNAME ///< The search encounters and returns a DNAME RR
+ DNAME, ///< The search encounters and returns a DNAME RR
+ WILDCARD, ///< Succes by wildcard match, for DNSSEC
+ WILDCARD_NXRRSET ///< NXRRSET on wildcard, for DNSSEC
};
/// A helper structure to represent the search result of \c find().
@@ -107,7 +137,11 @@ public:
/// performed on these values to express compound options.
enum FindOptions {
FIND_DEFAULT = 0, ///< The default options
- FIND_GLUE_OK = 1 ///< Allow search under a zone cut
+ FIND_GLUE_OK = 1, ///< Allow search under a zone cut
+ FIND_DNSSEC = 2 ///< Require DNSSEC data in the answer
+ ///< (RRSIG, NSEC, etc.). The implementation
+ ///< is allowed to include it even if it is
+ ///< not set.
};
///
@@ -138,7 +172,7 @@ public:
//@}
///
- /// \name Search Method
+ /// \name Search Methods
///
//@{
/// Search the zone for a given pair of domain name and RR type.
@@ -170,8 +204,8 @@ public:
/// We should revisit the interface before we heavily rely on it.
///
/// The \c options parameter specifies customized behavior of the search.
- /// Their semantics is as follows:
- /// - \c GLUE_OK Allow search under a zone cut. By default the search
+ /// Their semantics is as follows (they are or bit-field):
+ /// - \c FIND_GLUE_OK Allow search under a zone cut. By default the search
/// will stop once it encounters a zone cut. If this option is specified
/// it remembers information about the highest zone cut and continues
/// the search until it finds an exact match for the given name or it
@@ -179,6 +213,9 @@ public:
/// RRsets for that name are searched just like the normal case;
/// otherwise, if the search has encountered a zone cut, \c DELEGATION
/// with the information of the highest zone cut will be returned.
+ /// - \c FIND_DNSSEC Request that DNSSEC data (like NSEC, RRSIGs) are
+ /// returned with the answer. It is allowed for the data source to
+ /// include them even when not requested.
///
/// A derived version of this method may involve internal resource
/// allocation, especially for constructing the resulting RRset, and may
@@ -198,17 +235,272 @@ public:
isc::dns::RRsetList* target = NULL,
const FindOptions options
= FIND_DEFAULT) = 0;
+
+ /// \brief Get previous name in the zone
+ ///
+ /// Gets the previous name in the DNSSEC order. This can be used
+ /// to find the correct NSEC records for proving nonexistence
+ /// of domains.
+ ///
+ /// The concrete implementation might throw anything it thinks appropriate,
+ /// however it is recommended to stick to the ones listed here. The user
+ /// of this method should be able to handle any exceptions.
+ ///
+ /// This method does not include under-zone-cut data (glue data).
+ ///
+ /// \param query The name for which one we look for a previous one. The
+ /// queried name doesn't have to exist in the zone.
+ /// \return The preceding name
+ ///
+ /// \throw NotImplemented in case the data source backend doesn't support
+ /// DNSSEC or there is no previous in the zone (NSEC records might be
+ /// missing in the DB, the queried name is less or equal to the apex).
+ /// \throw DataSourceError for low-level or internal datasource errors
+ /// (like broken connection to database, wrong data living there).
+ /// \throw std::bad_alloc For allocation errors.
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
+ const = 0;
//@}
};
+/// \brief Operator to combine FindOptions
+///
+/// We would need to manually static-cast the options if we put or
+/// between them, which is undesired with bit-flag options. Therefore
+/// we hide the cast here, which is the simplest solution and it still
+/// provides reasonable level of type safety.
+inline ZoneFinder::FindOptions operator |(ZoneFinder::FindOptions a,
+ ZoneFinder::FindOptions b)
+{
+ return (static_cast<ZoneFinder::FindOptions>(static_cast<unsigned>(a) |
+ static_cast<unsigned>(b)));
+}
+
/// \brief A pointer-like type pointing to a \c ZoneFinder object.
typedef boost::shared_ptr<ZoneFinder> ZoneFinderPtr;
/// \brief A pointer-like type pointing to a \c ZoneFinder object.
typedef boost::shared_ptr<const ZoneFinder> ConstZoneFinderPtr;
-}
-}
+/// The base class to make updates to a single zone.
+///
+/// On construction, each derived class object will start a "transaction"
+/// for making updates to a specific zone (this means a constructor of
+/// a derived class would normally take parameters to identify the zone
+/// to be updated). The underlying realization of a "transaction" will differ
+/// for different derived classes; if it uses a general purpose database
+/// as a backend, it will involve performing some form of "begin transaction"
+/// statement for the database.
+///
+/// Updates (adding or deleting RRs) are made via \c addRRset() and
+/// \c deleteRRset() methods. Until the \c commit() method is called the
+/// changes are local to the updater object. For example, they won't be
+/// visible via a \c ZoneFinder object except the one returned by the
+/// updater's own \c getFinder() method. The \c commit() completes the
+/// transaction and makes the changes visible to others.
+///
+/// This class does not provide an explicit "rollback" interface. If
+/// something wrong or unexpected happens during the updates and the
+/// caller wants to cancel the intermediate updates, the caller should
+/// simply destruct the updater object without calling \c commit().
+/// The destructor is supposed to perform the "rollback" operation,
+/// depending on the internal details of the derived class.
+///
+/// \note This initial implementation provides a quite simple interface of
+/// adding and deleting RRs (see the description of the related methods).
+/// It may be revisited as we gain more experiences.
+class ZoneUpdater {
+protected:
+ /// The default constructor.
+ ///
+ /// This is intentionally defined as protected to ensure that this base
+ /// class is never instantiated directly.
+ ZoneUpdater() {}
+
+public:
+ /// The destructor
+ ///
+ /// Each derived class implementation must ensure that if \c commit()
+ /// has not been performed by the time of the call to it, then it
+ /// "rollbacks" the updates made via the updater so far.
+ virtual ~ZoneUpdater() {}
+
+ /// Return a finder for the zone being updated.
+ ///
+ /// The returned finder provides the functionalities of \c ZoneFinder
+ /// for the zone as updates are made via the updater. That is, before
+ /// making any update, the finder will be able to find all RRsets that
+ /// exist in the zone at the time the updater is created. If RRsets
+ /// are added or deleted via \c addRRset() or \c deleteRRset(),
+ /// this finder will find the added ones or miss the deleted ones
+ /// respectively.
+ ///
+ /// The finder returned by this method is effective only while the updates
+ /// are performed, i.e., from the construction of the corresponding
+ /// updater until \c commit() is performed or the updater is destructed
+ /// without commit. The result of a subsequent call to this method (or
+ /// the use of the result) after that is undefined.
+ ///
+ /// \return A reference to a \c ZoneFinder for the updated zone
+ virtual ZoneFinder& getFinder() = 0;
+
+ /// Add an RRset to a zone via the updater
+ ///
+ /// This may be revisited in a future version, but right now the intended
+ /// behavior of this method is simple: It "naively" adds the specified
+ /// RRset to the zone specified on creation of the updater.
+ /// It performs minimum level of validation on the specified RRset:
+ /// - Whether the RR class is identical to that for the zone to be updated
+ /// - Whether the RRset is not empty, i.e., it has at least one RDATA
+ /// - Whether the RRset is not associated with an RRSIG, i.e.,
+ /// whether \c getRRsig() on the RRset returns a NULL pointer.
+ ///
+ /// and otherwise does not check any oddity. For example, it doesn't
+ /// check whether the owner name of the specified RRset is a subdomain
+ /// of the zone's origin; it doesn't care whether or not there is already
+ /// an RRset of the same name and RR type in the zone, and if there is,
+ /// whether any of the existing RRs have duplicate RDATA with the added
+ /// ones. If these conditions matter the calling application must examine
+ /// the existing data beforehand using the \c ZoneFinder returned by
+ /// \c getFinder().
+ ///
+ /// The validation requirement on the associated RRSIG is temporary.
+ /// If we find it more reasonable and useful to allow adding a pair of
+ /// RRset and its RRSIG RRset as we gain experiences with the interface,
+ /// we may remove this restriction. Until then we explicitly check it
+ /// to prevent accidental misuse.
+ ///
+ /// Conceptually, on successful call to this method, the zone will have
+ /// the specified RRset, and if there is already an RRset of the same
+ /// name and RR type, these two sets will be "merged". "Merged" means
+ /// that a subsequent call to \c ZoneFinder::find() for the name and type
+ /// will result in success and the returned RRset will contain all
+ /// previously existing and newly added RDATAs with the TTL being the
+ /// minimum of the two RRsets. The underlying representation of the
+ /// "merged" RRsets may vary depending on the characteristic of the
+ /// underlying data source. For example, if it uses a general purpose
+ /// database that stores each RR of the same RRset separately, it may
+ /// simply be a larger sets of RRs based on both the existing and added
+ /// RRsets; the TTLs of the RRs may be different within the database, and
+ /// there may even be duplicate RRs in different database rows. As long
+ /// as the RRset returned via \c ZoneFinder::find() conforms to the
+ /// concept of "merge", the actual internal representation is up to the
+ /// implementation.
+ ///
+ /// This method must not be called once commit() is performed. If it
+ /// calls after \c commit() the implementation must throw a
+ /// \c DataSourceError exception.
+ ///
+ /// \todo As noted above we may have to revisit the design details as we
+ /// gain experiences:
+ ///
+ /// - we may want to check (and maybe reject) if there is already a
+ /// duplicate RR (that has the same RDATA).
+ /// - we may want to check (and maybe reject) if there is already an
+ /// RRset of the same name and RR type with different TTL
+ /// - we may even want to check if there is already any RRset of the
+ /// same name and RR type.
+ /// - we may want to add an "options" parameter that can control the
+ /// above points
+ /// - we may want to have this method return a value containing the
+ /// information on whether there's a duplicate, etc.
+ ///
+ /// \exception DataSourceError Called after \c commit(), RRset is invalid
+ /// (see above), internal data source error
+ /// \exception std::bad_alloc Resource allocation failure
+ ///
+ /// \param rrset The RRset to be added
+ virtual void addRRset(const isc::dns::RRset& rrset) = 0;
+
+ /// Delete an RRset from a zone via the updater
+ ///
+ /// Like \c addRRset(), the detailed semantics and behavior of this method
+ /// may have to be revisited in a future version. The following are
+ /// based on the initial implementation decisions.
+ ///
+ /// On successful completion of this method, it will remove from the zone
+ /// the RRs of the specified owner name and RR type that match one of
+ /// the RDATAs of the specified RRset. There are several points to be
+ /// noted:
+ /// - Existing RRs that don't match any of the specified RDATAs will
+ /// remain in the zone.
+ /// - Any RRs of the specified RRset that doesn't exist in the zone will
+ /// simply be ignored; the implementation of this method is not supposed
+ /// to check that condition.
+ /// - The TTL of the RRset is ignored; matching is only performed by
+ /// the owner name, RR type and RDATA
+ ///
+ /// Ignoring the TTL may not look sensible, but it's based on the
+ /// observation that it will result in more intuitive result, especially
+ /// when the underlying data source is a general purpose database.
+ /// See also \c DatabaseAccessor::deleteRecordInZone() on this point.
+ /// It also matches the dynamic update protocol (RFC2136), where TTLs
+ /// are ignored when deleting RRs.
+ ///
+ /// \note Since the TTL is ignored, this method could take the RRset
+ /// to be deleted as a tuple of name, RR type, and a list of RDATAs.
+ /// But in practice, it's quite likely that the caller has the RRset
+ /// in the form of the \c RRset object (e.g., extracted from a dynamic
+ /// update request message), so this interface would rather be more
+ /// convenient. If it turns out not to be true we can change or extend
+ /// the method signature.
+ ///
+ /// This method performs minimum level of validation on the specified
+ /// RRset:
+ /// - Whether the RR class is identical to that for the zone to be updated
+ /// - Whether the RRset is not empty, i.e., it has at least one RDATA
+ /// - Whether the RRset is not associated with an RRSIG, i.e.,
+ /// whether \c getRRsig() on the RRset returns a NULL pointer.
+ ///
+ /// This method must not be called once commit() is performed. If it
+ /// calls after \c commit() the implementation must throw a
+ /// \c DataSourceError exception.
+ ///
+ /// \todo As noted above we may have to revisit the design details as we
+ /// gain experiences:
+ ///
+ /// - we may want to check (and maybe reject) if some or all of the RRs
+ /// for the specified RRset don't exist in the zone
+ /// - we may want to allow an option to "delete everything" for specified
+ /// name and/or specified name + RR type.
+ /// - as mentioned above, we may want to include the TTL in matching the
+ /// deleted RRs
+ /// - we may want to add an "options" parameter that can control the
+ /// above points
+ /// - we may want to have this method return a value containing the
+ /// information on whether there's any RRs that are specified but don't
+ /// exit, the number of actually deleted RRs, etc.
+ ///
+ /// \exception DataSourceError Called after \c commit(), RRset is invalid
+ /// (see above), internal data source error
+ /// \exception std::bad_alloc Resource allocation failure
+ ///
+ /// \param rrset The RRset to be deleted
+ virtual void deleteRRset(const isc::dns::RRset& rrset) = 0;
+
+ /// Commit the updates made in the updater to the zone
+ ///
+ /// This method completes the "transaction" started at the creation
+ /// of the updater. After successful completion of this method, the
+ /// updates will be visible outside the scope of the updater.
+ /// The actual internal behavior will defer for different derived classes.
+ /// For a derived class with a general purpose database as a backend,
+ /// for example, this method would perform a "commit" statement for the
+ /// database.
+ ///
+ /// This operation can only be performed at most once. A duplicate call
+ /// must result in a DatasourceError exception.
+ ///
+ /// \exception DataSourceError Duplicate call of the method,
+ /// internal data source error
+ virtual void commit() = 0;
+};
+
+/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
+typedef boost::shared_ptr<ZoneUpdater> ZoneUpdaterPtr;
+
+} // end of datasrc
+} // end of isc
#endif // __ZONE_H
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 10a731f..0d2bffd 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -24,14 +24,21 @@ EXTRA_DIST += rdata/generic/cname_5.h
EXTRA_DIST += rdata/generic/detail/nsec_bitmap.cc
EXTRA_DIST += rdata/generic/detail/nsec_bitmap.h
EXTRA_DIST += rdata/generic/detail/txt_like.h
+EXTRA_DIST += rdata/generic/detail/ds_like.h
+EXTRA_DIST += rdata/generic/dlv_32769.cc
+EXTRA_DIST += rdata/generic/dlv_32769.h
EXTRA_DIST += rdata/generic/dname_39.cc
EXTRA_DIST += rdata/generic/dname_39.h
EXTRA_DIST += rdata/generic/dnskey_48.cc
EXTRA_DIST += rdata/generic/dnskey_48.h
EXTRA_DIST += rdata/generic/ds_43.cc
EXTRA_DIST += rdata/generic/ds_43.h
+EXTRA_DIST += rdata/generic/hinfo_13.cc
+EXTRA_DIST += rdata/generic/hinfo_13.h
EXTRA_DIST += rdata/generic/mx_15.cc
EXTRA_DIST += rdata/generic/mx_15.h
+EXTRA_DIST += rdata/generic/naptr_35.cc
+EXTRA_DIST += rdata/generic/naptr_35.h
EXTRA_DIST += rdata/generic/ns_2.cc
EXTRA_DIST += rdata/generic/ns_2.h
EXTRA_DIST += rdata/generic/nsec3_50.cc
@@ -54,12 +61,18 @@ EXTRA_DIST += rdata/generic/spf_99.cc
EXTRA_DIST += rdata/generic/spf_99.h
EXTRA_DIST += rdata/generic/txt_16.cc
EXTRA_DIST += rdata/generic/txt_16.h
+EXTRA_DIST += rdata/generic/minfo_14.cc
+EXTRA_DIST += rdata/generic/minfo_14.h
+EXTRA_DIST += rdata/generic/afsdb_18.cc
+EXTRA_DIST += rdata/generic/afsdb_18.h
EXTRA_DIST += rdata/hs_4/a_1.cc
EXTRA_DIST += rdata/hs_4/a_1.h
EXTRA_DIST += rdata/in_1/a_1.cc
EXTRA_DIST += rdata/in_1/a_1.h
EXTRA_DIST += rdata/in_1/aaaa_28.cc
EXTRA_DIST += rdata/in_1/aaaa_28.h
+EXTRA_DIST += rdata/in_1/dhcid_49.cc
+EXTRA_DIST += rdata/in_1/dhcid_49.h
EXTRA_DIST += rdata/in_1/srv_33.cc
EXTRA_DIST += rdata/in_1/srv_33.h
#EXTRA_DIST += rdata/template.cc
@@ -93,9 +106,11 @@ libdns___la_SOURCES += tsig.h tsig.cc
libdns___la_SOURCES += tsigerror.h tsigerror.cc
libdns___la_SOURCES += tsigkey.h tsigkey.cc
libdns___la_SOURCES += tsigrecord.h tsigrecord.cc
+libdns___la_SOURCES += character_string.h character_string.cc
libdns___la_SOURCES += rdata/generic/detail/nsec_bitmap.h
libdns___la_SOURCES += rdata/generic/detail/nsec_bitmap.cc
libdns___la_SOURCES += rdata/generic/detail/txt_like.h
+libdns___la_SOURCES += rdata/generic/detail/ds_like.h
libdns___la_CPPFLAGS = $(AM_CPPFLAGS)
# Most applications of libdns++ will only implicitly rely on libcryptolink,
diff --git a/src/lib/dns/character_string.cc b/src/lib/dns/character_string.cc
new file mode 100644
index 0000000..3a289ac
--- /dev/null
+++ b/src/lib/dns/character_string.cc
@@ -0,0 +1,140 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "character_string.h"
+#include "rdata.h"
+
+using namespace std;
+using namespace isc::dns::rdata;
+
+namespace isc {
+namespace dns {
+
+namespace {
+bool isDigit(char c) {
+ return (('0' <= c) && (c <= '9'));
+}
+}
+
+std::string
+characterstr::getNextCharacterString(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ string result;
+
+ // If the input string only contains white-spaces, it is an invalid
+ // <character-string>
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText, "Invalid text format, \
+ <character-string> field is missing.");
+ }
+
+ // Whether the <character-string> is separated with double quotes (")
+ bool quotes_separated = (*input_iterator == '"');
+ // Whether the quotes are pared if the string is quotes separated
+ bool quotes_paired = false;
+
+ if (quotes_separated) {
+ ++input_iterator;
+ }
+
+ while(input_iterator < input_str.end()){
+ // Escaped characters processing
+ if (*input_iterator == '\\') {
+ if (input_iterator + 1 == input_str.end()) {
+ isc_throw(InvalidRdataText, "<character-string> ended \
+ prematurely.");
+ } else {
+ if (isDigit(*(input_iterator + 1))) {
+ // \DDD where each D is a digit. It its the octet
+ // corresponding to the decimal number described by DDD
+ if (input_iterator + 3 >= input_str.end()) {
+ isc_throw(InvalidRdataText, "<character-string> ended \
+ prematurely.");
+ } else {
+ int n = 0;
+ ++input_iterator;
+ for (int i = 0; i < 3; ++i) {
+ if (isDigit(*input_iterator)) {
+ n = n*10 + (*input_iterator - '0');
+ ++input_iterator;
+ } else {
+ isc_throw(InvalidRdataText, "Illegal decimal \
+ escaping series");
+ }
+ }
+ if (n > 255) {
+ isc_throw(InvalidRdataText, "Illegal octet \
+ number");
+ }
+ result.push_back(n);
+ continue;
+ }
+ } else {
+ ++input_iterator;
+ result.push_back(*input_iterator);
+ ++input_iterator;
+ continue;
+ }
+ }
+ }
+
+ if (quotes_separated) {
+ // If the <character-string> is seperated with quotes symbol and
+ // another quotes symbol is encountered, it is the end of the
+ // <character-string>
+ if (*input_iterator == '"') {
+ quotes_paired = true;
+ ++input_iterator;
+ // Reach the end of character string
+ break;
+ }
+ } else if (*input_iterator == ' ') {
+ // If the <character-string> is not seperated with quotes symbol,
+ // it is seperated with <space> char
+ break;
+ }
+
+ result.push_back(*input_iterator);
+
+ ++input_iterator;
+ }
+
+ if (result.size() > MAX_CHARSTRING_LEN) {
+ isc_throw(CharStringTooLong, "<character-string> is too long");
+ }
+
+ if (quotes_separated && !quotes_paired) {
+ isc_throw(InvalidRdataText, "The quotes are not paired");
+ }
+
+ return (result);
+}
+
+std::string
+characterstr::getNextCharacterString(util::InputBuffer& buffer, size_t len) {
+ uint8_t str_len = buffer.readUint8();
+
+ size_t pos = buffer.getPosition();
+ if (len - pos < str_len) {
+ isc_throw(InvalidRdataLength, "Invalid string length");
+ }
+
+ uint8_t buf[MAX_CHARSTRING_LEN];
+ buffer.readData(buf, str_len);
+ return (string(buf, buf + str_len));
+}
+
+} // end of namespace dns
+} // end of namespace isc
diff --git a/src/lib/dns/character_string.h b/src/lib/dns/character_string.h
new file mode 100644
index 0000000..7961274
--- /dev/null
+++ b/src/lib/dns/character_string.h
@@ -0,0 +1,57 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __CHARACTER_STRING_H
+#define __CHARACTER_STRING_H
+
+#include <string>
+#include <exceptions/exceptions.h>
+#include <util/buffer.h>
+
+namespace isc {
+namespace dns {
+
+// \brief Some utility functions to extract <character-string> from string
+// or InputBuffer
+//
+// <character-string> is expressed in one or two ways: as a contiguous set
+// of characters without interior spaces, or as a string beginning with a "
+// and ending with a ". Inside a " delimited string any character can
+// occur, except for a " itself, which must be quoted using \ (back slash).
+// Ref. RFC1035
+
+
+namespace characterstr {
+ /// Get a <character-string> from a string
+ ///
+ /// \param input_str The input string
+ /// \param input_iterator The iterator from which to start extracting,
+ /// the iterator will be updated to new position after the function
+ /// is returned
+ /// \return A std::string that contains the extracted <character-string>
+ std::string getNextCharacterString(const std::string& input_str,
+ std::string::const_iterator& input_iterator);
+
+ /// Get a <character-string> from a input buffer
+ ///
+ /// \param buffer The input buffer
+ /// \param len The input buffer total length
+ /// \return A std::string that contains the extracted <character-string>
+ std::string getNextCharacterString(util::InputBuffer& buffer, size_t len);
+
+} // namespace characterstr
+} // namespace dns
+} // namespace isc
+
+#endif // __CHARACTER_STRING_H
diff --git a/src/lib/dns/gen-rdatacode.py.in b/src/lib/dns/gen-rdatacode.py.in
index b3c8da2..f3cd5df 100755
--- a/src/lib/dns/gen-rdatacode.py.in
+++ b/src/lib/dns/gen-rdatacode.py.in
@@ -133,7 +133,15 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
if classdir_mtime < getmtime('@srcdir@/rdata'):
classdir_mtime = getmtime('@srcdir@/rdata')
- for dir in list(os.listdir('@srcdir@/rdata')):
+ # Sort directories before iterating through them so that the directory
+ # list is processed in the same order on all systems. The resulting
+ # files should compile regardless of the order in which the components
+ # are included but... Having a fixed order for the directories should
+ # eliminate system-dependent problems. (Note that the drectory names
+ # in BIND 10 are ASCII, so the order should be locale-independent.)
+ dirlist = os.listdir('@srcdir@/rdata')
+ dirlist.sort()
+ for dir in dirlist:
classdir = '@srcdir@/rdata' + os.sep + dir
m = re_typecode.match(dir)
if os.path.isdir(classdir) and (m != None or dir == 'generic'):
@@ -145,7 +153,12 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
class_code = m.group(2)
if not class_code in classcode2txt:
classcode2txt[class_code] = class_txt
- for file in list(os.listdir(classdir)):
+
+ # Same considerations as directories regarding sorted order
+ # also apply to files.
+ filelist = os.listdir(classdir)
+ filelist.sort()
+ for file in filelist:
file = classdir + os.sep + file
m = re_typecode.match(os.path.split(file)[1])
if m != None:
diff --git a/src/lib/dns/message.cc b/src/lib/dns/message.cc
index c5ba4e1..b3e9229 100644
--- a/src/lib/dns/message.cc
+++ b/src/lib/dns/message.cc
@@ -124,10 +124,12 @@ public:
void setOpcode(const Opcode& opcode);
void setRcode(const Rcode& rcode);
int parseQuestion(InputBuffer& buffer);
- int parseSection(const Message::Section section, InputBuffer& buffer);
+ int parseSection(const Message::Section section, InputBuffer& buffer,
+ Message::ParseOptions options);
void addRR(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
- const RRTTL& ttl, ConstRdataPtr rdata);
+ const RRTTL& ttl, ConstRdataPtr rdata,
+ Message::ParseOptions options);
void addEDNS(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
const RRTTL& ttl, const Rdata& rdata);
@@ -614,7 +616,7 @@ Message::parseHeader(InputBuffer& buffer) {
}
void
-Message::fromWire(InputBuffer& buffer) {
+Message::fromWire(InputBuffer& buffer, ParseOptions options) {
if (impl_->mode_ != Message::PARSE) {
isc_throw(InvalidMessageOperation,
"Message parse attempted in non parse mode");
@@ -626,11 +628,11 @@ Message::fromWire(InputBuffer& buffer) {
impl_->counts_[SECTION_QUESTION] = impl_->parseQuestion(buffer);
impl_->counts_[SECTION_ANSWER] =
- impl_->parseSection(SECTION_ANSWER, buffer);
+ impl_->parseSection(SECTION_ANSWER, buffer, options);
impl_->counts_[SECTION_AUTHORITY] =
- impl_->parseSection(SECTION_AUTHORITY, buffer);
+ impl_->parseSection(SECTION_AUTHORITY, buffer, options);
impl_->counts_[SECTION_ADDITIONAL] =
- impl_->parseSection(SECTION_ADDITIONAL, buffer);
+ impl_->parseSection(SECTION_ADDITIONAL, buffer, options);
}
int
@@ -706,7 +708,7 @@ struct MatchRR : public unary_function<RRsetPtr, bool> {
// is hardcoded here.
int
MessageImpl::parseSection(const Message::Section section,
- InputBuffer& buffer)
+ InputBuffer& buffer, Message::ParseOptions options)
{
assert(section < MessageImpl::NUM_SECTIONS);
@@ -738,7 +740,7 @@ MessageImpl::parseSection(const Message::Section section,
addTSIG(section, count, buffer, start_position, name, rrclass, ttl,
*rdata);
} else {
- addRR(section, name, rrclass, rrtype, ttl, rdata);
+ addRR(section, name, rrclass, rrtype, ttl, rdata, options);
++added;
}
}
@@ -749,19 +751,22 @@ MessageImpl::parseSection(const Message::Section section,
void
MessageImpl::addRR(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
- const RRTTL& ttl, ConstRdataPtr rdata)
+ const RRTTL& ttl, ConstRdataPtr rdata,
+ Message::ParseOptions options)
{
- vector<RRsetPtr>::iterator it =
- find_if(rrsets_[section].begin(), rrsets_[section].end(),
- MatchRR(name, rrtype, rrclass));
- if (it != rrsets_[section].end()) {
- (*it)->setTTL(min((*it)->getTTL(), ttl));
- (*it)->addRdata(rdata);
- } else {
- RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl));
- rrset->addRdata(rdata);
- rrsets_[section].push_back(rrset);
+ if ((options & Message::PRESERVE_ORDER) == 0) {
+ vector<RRsetPtr>::iterator it =
+ find_if(rrsets_[section].begin(), rrsets_[section].end(),
+ MatchRR(name, rrtype, rrclass));
+ if (it != rrsets_[section].end()) {
+ (*it)->setTTL(min((*it)->getTTL(), ttl));
+ (*it)->addRdata(rdata);
+ return;
+ }
}
+ RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl));
+ rrset->addRdata(rdata);
+ rrsets_[section].push_back(rrset);
}
void
diff --git a/src/lib/dns/message.h b/src/lib/dns/message.h
index 6a8bf9f..f286c67 100644
--- a/src/lib/dns/message.h
+++ b/src/lib/dns/message.h
@@ -581,11 +581,58 @@ public:
/// message
void toWire(AbstractMessageRenderer& renderer, TSIGContext& tsig_ctx);
+ /// Parse options.
+ ///
+ /// describe PRESERVE_ORDER: note doesn't affect EDNS or TSIG.
+ ///
+ /// The option values are used as a parameter for \c fromWire().
+ /// These are values of a bitmask type. Bitwise operations can be
+ /// performed on these values to express compound options.
+ enum ParseOptions {
+ PARSE_DEFAULT = 0, ///< The default options
+ PRESERVE_ORDER = 1 ///< Preserve RR order and don't combine them
+ };
+
/// \brief Parse the header section of the \c Message.
void parseHeader(isc::util::InputBuffer& buffer);
- /// \brief Parse the \c Message.
- void fromWire(isc::util::InputBuffer& buffer);
+ /// \brief (Re)build a \c Message object from wire-format data.
+ ///
+ /// This method parses the given wire format data to build a
+ /// complete Message object. On success, the values of the header section
+ /// fields can be accessible via corresponding get methods, and the
+ /// question and following sections can be accessible via the
+ /// corresponding iterators. If the message contains an EDNS or TSIG,
+ /// they can be accessible via \c getEDNS() and \c getTSIGRecord(),
+ /// respectively.
+ ///
+ /// This \c Message must be in the \c PARSE mode.
+ ///
+ /// This method performs strict validation on the given message based
+ /// on the DNS protocol specifications. If the given message data is
+ /// invalid, this method throws an exception (see the exception list).
+ ///
+ /// By default, this method combines RRs of the same name, RR type and
+ /// RR class in a section into a single RRset, even if they are interleaved
+ /// with a different type of RR (though it would be a rare case in
+ /// practice). If the \c PRESERVE_ORDER option is specified, it handles
+ /// each RR separately, in the appearing order, and converts it to a
+ /// separate RRset (so this RRset should contain exactly one Rdata).
+ /// This mode will be necessary when the higher level protocol is
+ /// ordering conscious. For example, in AXFR and IXFR, the position of
+ /// the SOA RRs are crucial.
+ ///
+ /// \exception InvalidMessageOperation \c Message is in the RENDER mode
+ /// \exception DNSMessageFORMERR The given message data is syntactically
+ /// \exception MessageTooShort The given data is shorter than a valid
+ /// header section
+ /// \exception std::bad_alloc Memory allocation failure
+ /// \exception Others \c Name, \c Rdata, and \c EDNS classes can also throw
+ ///
+ /// \param buffer A input buffer object that stores the wire data
+ /// \param options Parse options
+ void fromWire(isc::util::InputBuffer& buffer, ParseOptions options
+ = PARSE_DEFAULT);
///
/// \name Protocol constants
@@ -629,6 +676,6 @@ std::ostream& operator<<(std::ostream& os, const Message& message);
}
#endif // __MESSAGE_H
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/dns/python/Makefile.am b/src/lib/dns/python/Makefile.am
index 6c4ef54..3b89358 100644
--- a/src/lib/dns/python/Makefile.am
+++ b/src/lib/dns/python/Makefile.am
@@ -4,40 +4,47 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
-pyexec_LTLIBRARIES = pydnspp.la
-pydnspp_la_SOURCES = pydnspp.cc pydnspp_common.cc pydnspp_towire.h
-pydnspp_la_SOURCES += name_python.cc name_python.h
-pydnspp_la_SOURCES += messagerenderer_python.cc messagerenderer_python.h
-pydnspp_la_SOURCES += rcode_python.cc rcode_python.h
-pydnspp_la_SOURCES += tsigkey_python.cc tsigkey_python.h
-pydnspp_la_SOURCES += tsigerror_python.cc tsigerror_python.h
-pydnspp_la_SOURCES += tsig_rdata_python.cc tsig_rdata_python.h
-pydnspp_la_SOURCES += tsigrecord_python.cc tsigrecord_python.h
-pydnspp_la_SOURCES += tsig_python.cc tsig_python.h
+lib_LTLIBRARIES = libpydnspp.la
+libpydnspp_la_SOURCES = pydnspp_common.cc pydnspp_common.h pydnspp_towire.h
+libpydnspp_la_SOURCES += name_python.cc name_python.h
+libpydnspp_la_SOURCES += rrset_python.cc rrset_python.h
+libpydnspp_la_SOURCES += rrclass_python.cc rrclass_python.h
+libpydnspp_la_SOURCES += rrtype_python.cc rrtype_python.h
+libpydnspp_la_SOURCES += rrttl_python.cc rrttl_python.h
+libpydnspp_la_SOURCES += rdata_python.cc rdata_python.h
+libpydnspp_la_SOURCES += messagerenderer_python.cc messagerenderer_python.h
+libpydnspp_la_SOURCES += rcode_python.cc rcode_python.h
+libpydnspp_la_SOURCES += opcode_python.cc opcode_python.h
+libpydnspp_la_SOURCES += question_python.cc question_python.h
+libpydnspp_la_SOURCES += tsigkey_python.cc tsigkey_python.h
+libpydnspp_la_SOURCES += tsigerror_python.cc tsigerror_python.h
+libpydnspp_la_SOURCES += tsig_rdata_python.cc tsig_rdata_python.h
+libpydnspp_la_SOURCES += tsigrecord_python.cc tsigrecord_python.h
+libpydnspp_la_SOURCES += tsig_python.cc tsig_python.h
+libpydnspp_la_SOURCES += edns_python.cc edns_python.h
+libpydnspp_la_SOURCES += message_python.cc message_python.h
+
+libpydnspp_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+libpydnspp_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+libpydnspp_la_LDFLAGS = $(PYTHON_LDFLAGS)
+
+
+pyexec_LTLIBRARIES = pydnspp.la
+pydnspp_la_SOURCES = pydnspp.cc
pydnspp_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
# placed after -Wextra defined in AM_CXXFLAGS
pydnspp_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
pydnspp_la_LDFLAGS = $(PYTHON_LDFLAGS)
-# directly included from source files, so these don't have their own
-# rules
-EXTRA_DIST = pydnspp_common.h
-EXTRA_DIST += edns_python.cc
-EXTRA_DIST += message_python.cc
-EXTRA_DIST += rrclass_python.cc
-EXTRA_DIST += opcode_python.cc
-EXTRA_DIST += rrset_python.cc
-EXTRA_DIST += question_python.cc
-EXTRA_DIST += rrttl_python.cc
-EXTRA_DIST += rdata_python.cc
-EXTRA_DIST += rrtype_python.cc
-EXTRA_DIST += tsigerror_python_inc.cc
+EXTRA_DIST = tsigerror_python_inc.cc
+EXTRA_DIST += message_python_inc.cc
# Python prefers .so, while some OSes (specifically MacOS) use a different
# suffix for dynamic objects. -module is necessary to work this around.
pydnspp_la_LDFLAGS += -module
pydnspp_la_LIBADD = $(top_builddir)/src/lib/dns/libdns++.la
pydnspp_la_LIBADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+pydnspp_la_LIBADD += libpydnspp.la
pydnspp_la_LIBADD += $(PYTHON_LIB)
diff --git a/src/lib/dns/python/edns_python.cc b/src/lib/dns/python/edns_python.cc
index 83c3bfa..8f0f1a4 100644
--- a/src/lib/dns/python/edns_python.cc
+++ b/src/lib/dns/python/edns_python.cc
@@ -12,38 +12,38 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
+
#include <cassert>
#include <dns/edns.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "edns_python.h"
+#include "name_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rdata_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
using namespace isc::dns;
-using namespace isc::util;
using namespace isc::dns::rdata;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
namespace {
-//
-// EDNS
-//
-
-// The s_* Class simply covers one instantiation of the object
class s_EDNS : public PyObject {
public:
- EDNS* edns;
+ EDNS* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
+typedef CPPPyObjectContainer<s_EDNS, EDNS> EDNSContainer;
// General creation and destruction
int EDNS_init(s_EDNS* self, PyObject* args);
@@ -103,60 +103,6 @@ PyMethodDef EDNS_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_EDNS
-// Most of the functions are not actually implemented and NULL here.
-PyTypeObject edns_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.EDNS",
- sizeof(s_EDNS), // tp_basicsize
- 0, // tp_itemsize
- (destructor)EDNS_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- EDNS_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The EDNS class encapsulates DNS extensions "
- "provided by the EDNSx protocol.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- EDNS_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)EDNS_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
EDNS*
createFromRR(const Name& name, const RRClass& rrclass, const RRType& rrtype,
const RRTTL& rrttl, const Rdata& rdata, uint8_t& extended_rcode)
@@ -179,15 +125,15 @@ createFromRR(const Name& name, const RRClass& rrclass, const RRType& rrtype,
int
EDNS_init(s_EDNS* self, PyObject* args) {
uint8_t version = EDNS::SUPPORTED_VERSION;
- const s_Name* name;
- const s_RRClass* rrclass;
- const s_RRType* rrtype;
- const s_RRTTL* rrttl;
- const s_Rdata* rdata;
+ const PyObject* name;
+ const PyObject* rrclass;
+ const PyObject* rrtype;
+ const PyObject* rrttl;
+ const PyObject* rdata;
if (PyArg_ParseTuple(args, "|b", &version)) {
try {
- self->edns = new EDNS(version);
+ self->cppobj = new EDNS(version);
} catch (const isc::InvalidParameter& ex) {
PyErr_SetString(po_InvalidParameter, ex.what());
return (-1);
@@ -203,10 +149,12 @@ EDNS_init(s_EDNS* self, PyObject* args) {
// in this context so that we can share the try-catch logic with
// EDNS_createFromRR() (see below).
uint8_t extended_rcode;
- self->edns = createFromRR(*name->cppobj, *rrclass->rrclass,
- *rrtype->rrtype, *rrttl->rrttl,
- *rdata->rdata, extended_rcode);
- return (self->edns != NULL ? 0 : -1);
+ self->cppobj = createFromRR(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype),
+ PyRRTTL_ToRRTTL(rrttl),
+ PyRdata_ToRdata(rdata), extended_rcode);
+ return (self->cppobj != NULL ? 0 : -1);
}
PyErr_Clear();
@@ -217,19 +165,19 @@ EDNS_init(s_EDNS* self, PyObject* args) {
void
EDNS_destroy(s_EDNS* const self) {
- delete self->edns;
- self->edns = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
PyObject*
EDNS_toText(const s_EDNS* const self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->edns->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
PyObject*
-EDNS_str(PyObject* const self) {
+EDNS_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
const_cast<char*>("to_text"),
@@ -240,14 +188,14 @@ PyObject*
EDNS_toWire(const s_EDNS* const self, PyObject* args) {
PyObject* bytes;
uint8_t extended_rcode;
- s_MessageRenderer* renderer;
+ PyObject* renderer;
if (PyArg_ParseTuple(args, "Ob", &bytes, &extended_rcode) &&
PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(0);
- self->edns->toWire(buffer, extended_rcode);
+ self->cppobj->toWire(buffer, extended_rcode);
PyObject* rd_bytes = PyBytes_FromStringAndSize(
static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
@@ -257,8 +205,8 @@ EDNS_toWire(const s_EDNS* const self, PyObject* args) {
return (result);
} else if (PyArg_ParseTuple(args, "O!b", &messagerenderer_type,
&renderer, &extended_rcode)) {
- const unsigned int n = self->edns->toWire(*renderer->messagerenderer,
- extended_rcode);
+ const unsigned int n = self->cppobj->toWire(
+ PyMessageRenderer_ToMessageRenderer(renderer), extended_rcode);
return (Py_BuildValue("I", n));
}
@@ -269,12 +217,12 @@ EDNS_toWire(const s_EDNS* const self, PyObject* args) {
PyObject*
EDNS_getVersion(const s_EDNS* const self) {
- return (Py_BuildValue("B", self->edns->getVersion()));
+ return (Py_BuildValue("B", self->cppobj->getVersion()));
}
PyObject*
EDNS_getDNSSECAwareness(const s_EDNS* const self) {
- if (self->edns->getDNSSECAwareness()) {
+ if (self->cppobj->getDNSSECAwareness()) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -287,13 +235,13 @@ EDNS_setDNSSECAwareness(s_EDNS* self, PyObject* args) {
if (!PyArg_ParseTuple(args, "O!", &PyBool_Type, &b)) {
return (NULL);
}
- self->edns->setDNSSECAwareness(b == Py_True);
+ self->cppobj->setDNSSECAwareness(b == Py_True);
Py_RETURN_NONE;
}
PyObject*
EDNS_getUDPSize(const s_EDNS* const self) {
- return (Py_BuildValue("I", self->edns->getUDPSize()));
+ return (Py_BuildValue("I", self->cppobj->getUDPSize()));
}
PyObject*
@@ -310,17 +258,17 @@ EDNS_setUDPSize(s_EDNS* self, PyObject* args) {
"UDP size is not an unsigned 16-bit integer");
return (NULL);
}
- self->edns->setUDPSize(size);
+ self->cppobj->setUDPSize(size);
Py_RETURN_NONE;
}
PyObject*
EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
- const s_Name* name;
- const s_RRClass* rrclass;
- const s_RRType* rrtype;
- const s_RRTTL* rrttl;
- const s_Rdata* rdata;
+ const PyObject* name;
+ const PyObject* rrclass;
+ const PyObject* rrtype;
+ const PyObject* rrttl;
+ const PyObject* rdata;
s_EDNS* edns_obj = NULL;
assert(null_self == NULL);
@@ -334,14 +282,17 @@ EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
return (NULL);
}
- edns_obj->edns = createFromRR(*name->cppobj, *rrclass->rrclass,
- *rrtype->rrtype, *rrttl->rrttl,
- *rdata->rdata, extended_rcode);
- if (edns_obj->edns != NULL) {
+ edns_obj->cppobj = createFromRR(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype),
+ PyRRTTL_ToRRTTL(rrttl),
+ PyRdata_ToRdata(rdata),
+ extended_rcode);
+ if (edns_obj->cppobj != NULL) {
PyObject* extrcode_obj = Py_BuildValue("B", extended_rcode);
return (Py_BuildValue("OO", edns_obj, extrcode_obj));
}
-
+
Py_DECREF(edns_obj);
return (NULL);
}
@@ -353,23 +304,90 @@ EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
}
} // end of anonymous namespace
-// end of EDNS
-// Module Initialization, all statics are initialized here
+namespace isc {
+namespace dns {
+namespace python {
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_EDNS
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject edns_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.EDNS",
+ sizeof(s_EDNS), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)EDNS_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ EDNS_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The EDNS class encapsulates DNS extensions "
+ "provided by the EDNSx protocol.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ EDNS_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)EDNS_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createEDNSObject(const EDNS& source) {
+ EDNSContainer container(PyObject_New(s_EDNS, &edns_type));
+ container.set(new EDNS(source));
+ return (container.release());
+}
+
bool
-initModulePart_EDNS(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&edns_type) < 0) {
- return (false);
+PyEDNS_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&edns_type);
- void* p = &edns_type;
- PyModule_AddObject(mod, "EDNS", static_cast<PyObject*>(p));
-
- addClassVariable(edns_type, "SUPPORTED_VERSION",
- Py_BuildValue("B", EDNS::SUPPORTED_VERSION));
+ return (PyObject_TypeCheck(obj, &edns_type));
+}
- return (true);
+const EDNS&
+PyEDNS_ToEDNS(const PyObject* edns_obj) {
+ if (edns_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in EDNS PyObject conversion");
+ }
+ const s_EDNS* edns = static_cast<const s_EDNS*>(edns_obj);
+ return (*edns->cppobj);
}
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/edns_python.h b/src/lib/dns/python/edns_python.h
new file mode 100644
index 0000000..30d92ab
--- /dev/null
+++ b/src/lib/dns/python/edns_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_EDNS_H
+#define __PYTHON_EDNS_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class EDNS;
+
+namespace python {
+
+extern PyTypeObject edns_type;
+
+/// This is a simple shortcut to create a python EDNS object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createEDNSObject(const EDNS& source);
+
+/// \brief Checks if the given python object is a EDNS object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type EDNS, false otherwise
+bool PyEDNS_Check(PyObject* obj);
+
+/// \brief Returns a reference to the EDNS object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type EDNS; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyEDNS_Check()
+///
+/// \note This is not a copy; if the EDNS is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param edns_obj The edns object to convert
+const EDNS& PyEDNS_ToEDNS(const PyObject* edns_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_EDNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/message_python.cc b/src/lib/dns/python/message_python.cc
index 00596f8..2349401 100644
--- a/src/lib/dns/python/message_python.cc
+++ b/src/lib/dns/python/message_python.cc
@@ -12,49 +12,42 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
#include <exceptions/exceptions.h>
#include <dns/message.h>
#include <dns/rcode.h>
#include <dns/tsig.h>
-
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+
+#include "name_python.h"
+#include "question_python.h"
+#include "edns_python.h"
+#include "rcode_python.h"
+#include "opcode_python.h"
+#include "rrset_python.h"
+#include "message_python.h"
+#include "messagerenderer_python.h"
+#include "tsig_python.h"
+#include "tsigrecord_python.h"
+#include "pydnspp_common.h"
+
+using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
-namespace {
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-PyObject* po_MessageTooShort;
-PyObject* po_InvalidMessageSection;
-PyObject* po_InvalidMessageOperation;
-PyObject* po_InvalidMessageUDPSize;
-
-//
-// Definition of the classes
-//
+// Import pydoc text
+#include "message_python_inc.cc"
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// Message
-//
-
-// The s_* Class simply coverst one instantiation of the object
+namespace {
class s_Message : public PyObject {
public:
- Message* message;
+ isc::dns::Message* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
int Message_init(s_Message* self, PyObject* args);
void Message_destroy(s_Message* self);
@@ -85,7 +78,7 @@ PyObject* Message_makeResponse(s_Message* self);
PyObject* Message_toText(s_Message* self);
PyObject* Message_str(PyObject* self);
PyObject* Message_toWire(s_Message* self, PyObject* args);
-PyObject* Message_fromWire(s_Message* self, PyObject* args);
+PyObject* Message_fromWire(PyObject* pyself, PyObject* args);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -167,70 +160,10 @@ PyMethodDef Message_methods[] = {
"If the given message is not in RENDER mode, an "
"InvalidMessageOperation is raised.\n"
},
- { "from_wire", reinterpret_cast<PyCFunction>(Message_fromWire), METH_VARARGS,
- "Parses the given wire format to a Message object.\n"
- "The first argument is a Message to parse the data into.\n"
- "The second argument must implement the buffer interface.\n"
- "If the given message is not in PARSE mode, an "
- "InvalidMessageOperation is raised.\n"
- "Raises MessageTooShort, DNSMessageFORMERR or DNSMessageBADVERS "
- " if there is a problem parsing the message." },
+ { "from_wire", Message_fromWire, METH_VARARGS, Message_fromWire_doc },
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Message
-// Most of the functions are not actually implemented and NULL here.
-PyTypeObject message_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Message",
- sizeof(s_Message), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Message_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Message_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Message class encapsulates a standard DNS message.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Message_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Message_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
int
Message_init(s_Message* self, PyObject* args) {
int i;
@@ -238,10 +171,10 @@ Message_init(s_Message* self, PyObject* args) {
if (PyArg_ParseTuple(args, "i", &i)) {
PyErr_Clear();
if (i == Message::PARSE) {
- self->message = new Message(Message::PARSE);
+ self->cppobj = new Message(Message::PARSE);
return (0);
} else if (i == Message::RENDER) {
- self->message = new Message(Message::RENDER);
+ self->cppobj = new Message(Message::RENDER);
return (0);
} else {
PyErr_SetString(PyExc_TypeError, "Message mode must be Message.PARSE or Message.RENDER");
@@ -256,8 +189,8 @@ Message_init(s_Message* self, PyObject* args) {
void
Message_destroy(s_Message* self) {
- delete self->message;
- self->message = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
@@ -271,7 +204,7 @@ Message_getHeaderFlag(s_Message* self, PyObject* args) {
return (NULL);
}
- if (self->message->getHeaderFlag(
+ if (self->cppobj->getHeaderFlag(
static_cast<Message::HeaderFlag>(messageflag))) {
Py_RETURN_TRUE;
} else {
@@ -296,7 +229,7 @@ Message_setHeaderFlag(s_Message* self, PyObject* args) {
}
try {
- self->message->setHeaderFlag(
+ self->cppobj->setHeaderFlag(
static_cast<Message::HeaderFlag>(messageflag), on == Py_True);
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
@@ -312,7 +245,7 @@ Message_setHeaderFlag(s_Message* self, PyObject* args) {
PyObject*
Message_getQid(s_Message* self) {
- return (Py_BuildValue("I", self->message->getQid()));
+ return (Py_BuildValue("I", self->cppobj->getQid()));
}
PyObject*
@@ -331,7 +264,7 @@ Message_setQid(s_Message* self, PyObject* args) {
}
try {
- self->message->setQid(id);
+ self->cppobj->setQid(id);
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -341,35 +274,25 @@ Message_setQid(s_Message* self, PyObject* args) {
PyObject*
Message_getRcode(s_Message* self) {
- s_Rcode* rcode;
-
- rcode = static_cast<s_Rcode*>(rcode_type.tp_alloc(&rcode_type, 0));
- if (rcode != NULL) {
- rcode->cppobj = NULL;
- try {
- rcode->cppobj = new Rcode(self->message->getRcode());
- } catch (const InvalidMessageOperation& imo) {
- PyErr_SetString(po_InvalidMessageOperation, imo.what());
- } catch (...) {
- PyErr_SetString(po_IscException, "Unexpected exception");
- }
- if (rcode->cppobj == NULL) {
- Py_DECREF(rcode);
- return (NULL);
- }
+ try {
+ return (createRcodeObject(self->cppobj->getRcode()));
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(po_IscException, "Unexpected exception");
+ return (NULL);
}
-
- return (rcode);
}
PyObject*
Message_setRcode(s_Message* self, PyObject* args) {
- s_Rcode* rcode;
+ PyObject* rcode;
if (!PyArg_ParseTuple(args, "O!", &rcode_type, &rcode)) {
return (NULL);
}
try {
- self->message->setRcode(*rcode->cppobj);
+ self->cppobj->setRcode(PyRcode_ToRcode(rcode));
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -379,35 +302,31 @@ Message_setRcode(s_Message* self, PyObject* args) {
PyObject*
Message_getOpcode(s_Message* self) {
- s_Opcode* opcode;
-
- opcode = static_cast<s_Opcode*>(opcode_type.tp_alloc(&opcode_type, 0));
- if (opcode != NULL) {
- opcode->opcode = NULL;
- try {
- opcode->opcode = new Opcode(self->message->getOpcode());
- } catch (const InvalidMessageOperation& imo) {
- PyErr_SetString(po_InvalidMessageOperation, imo.what());
- } catch (...) {
- PyErr_SetString(po_IscException, "Unexpected exception");
- }
- if (opcode->opcode == NULL) {
- Py_DECREF(opcode);
- return (NULL);
- }
+ try {
+ return (createOpcodeObject(self->cppobj->getOpcode()));
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get message opcode: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception getting opcode from message");
+ return (NULL);
}
-
- return (opcode);
}
PyObject*
Message_setOpcode(s_Message* self, PyObject* args) {
- s_Opcode* opcode;
+ PyObject* opcode;
if (!PyArg_ParseTuple(args, "O!", &opcode_type, &opcode)) {
return (NULL);
}
try {
- self->message->setOpcode(*opcode->opcode);
+ self->cppobj->setOpcode(PyOpcode_ToOpcode(opcode));
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -417,32 +336,31 @@ Message_setOpcode(s_Message* self, PyObject* args) {
PyObject*
Message_getEDNS(s_Message* self) {
- s_EDNS* edns;
- EDNS* edns_body;
- ConstEDNSPtr src = self->message->getEDNS();
-
+ ConstEDNSPtr src = self->cppobj->getEDNS();
if (!src) {
Py_RETURN_NONE;
}
- if ((edns_body = new(nothrow) EDNS(*src)) == NULL) {
- return (PyErr_NoMemory());
- }
- edns = static_cast<s_EDNS*>(opcode_type.tp_alloc(&edns_type, 0));
- if (edns != NULL) {
- edns->edns = edns_body;
+ try {
+ return (createEDNSObject(*src));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get EDNS from message: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting EDNS from message");
}
-
- return (edns);
+ return (NULL);
}
PyObject*
Message_setEDNS(s_Message* self, PyObject* args) {
- s_EDNS* edns;
+ PyObject* edns;
if (!PyArg_ParseTuple(args, "O!", &edns_type, &edns)) {
return (NULL);
}
try {
- self->message->setEDNS(EDNSPtr(new EDNS(*edns->edns)));
+ self->cppobj->setEDNS(EDNSPtr(new EDNS(PyEDNS_ToEDNS(edns))));
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -453,7 +371,7 @@ Message_setEDNS(s_Message* self, PyObject* args) {
PyObject*
Message_getTSIGRecord(s_Message* self) {
try {
- const TSIGRecord* tsig_record = self->message->getTSIGRecord();
+ const TSIGRecord* tsig_record = self->cppobj->getTSIGRecord();
if (tsig_record == NULL) {
Py_RETURN_NONE;
@@ -483,7 +401,7 @@ Message_getRRCount(s_Message* self, PyObject* args) {
return (NULL);
}
try {
- return (Py_BuildValue("I", self->message->getRRCount(
+ return (Py_BuildValue("I", self->cppobj->getRRCount(
static_cast<Message::Section>(section))));
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
@@ -496,8 +414,8 @@ PyObject*
Message_getQuestion(s_Message* self) {
QuestionIterator qi, qi_end;
try {
- qi = self->message->beginQuestion();
- qi_end = self->message->endQuestion();
+ qi = self->cppobj->beginQuestion();
+ qi_end = self->cppobj->endQuestion();
} catch (const InvalidMessageSection& ex) {
PyErr_SetString(po_InvalidMessageSection, ex.what());
return (NULL);
@@ -512,23 +430,25 @@ Message_getQuestion(s_Message* self) {
return (NULL);
}
- for (; qi != qi_end; ++qi) {
- s_Question *question = static_cast<s_Question*>(
- question_type.tp_alloc(&question_type, 0));
- if (question == NULL) {
- Py_DECREF(question);
- Py_DECREF(list);
- return (NULL);
- }
- question->question = *qi;
- if (PyList_Append(list, question) == -1) {
- Py_DECREF(question);
- Py_DECREF(list);
- return (NULL);
+ try {
+ for (; qi != qi_end; ++qi) {
+ if (PyList_Append(list, createQuestionObject(**qi)) == -1) {
+ Py_DECREF(list);
+ return (NULL);
+ }
}
- Py_DECREF(question);
+ return (list);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting Question section: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting Question section");
}
- return (list);
+ Py_DECREF(list);
+ return (NULL);
}
PyObject*
@@ -542,9 +462,9 @@ Message_getSection(s_Message* self, PyObject* args) {
}
RRsetIterator rrsi, rrsi_end;
try {
- rrsi = self->message->beginSection(
+ rrsi = self->cppobj->beginSection(
static_cast<Message::Section>(section));
- rrsi_end = self->message->endSection(
+ rrsi_end = self->cppobj->endSection(
static_cast<Message::Section>(section));
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
@@ -562,25 +482,25 @@ Message_getSection(s_Message* self, PyObject* args) {
if (list == NULL) {
return (NULL);
}
- for (; rrsi != rrsi_end; ++rrsi) {
- s_RRset *rrset = static_cast<s_RRset*>(
- rrset_type.tp_alloc(&rrset_type, 0));
- if (rrset == NULL) {
- Py_DECREF(rrset);
- Py_DECREF(list);
- return (NULL);
- }
- rrset->rrset = *rrsi;
- if (PyList_Append(list, rrset) == -1) {
- Py_DECREF(rrset);
- Py_DECREF(list);
- return (NULL);
+ try {
+ for (; rrsi != rrsi_end; ++rrsi) {
+ if (PyList_Append(list, createRRsetObject(**rrsi)) == -1) {
+ Py_DECREF(list);
+ return (NULL);
+ }
}
- // PyList_Append increases refcount, so we remove ours since
- // we don't need it anymore
- Py_DECREF(rrset);
+ return (list);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure creating Question object: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure creating Question object");
}
- return (list);
+ Py_DECREF(list);
+ return (NULL);
}
//static PyObject* Message_beginQuestion(s_Message* self, PyObject* args);
@@ -590,14 +510,14 @@ Message_getSection(s_Message* self, PyObject* args) {
//static PyObject* Message_addQuestion(s_Message* self, PyObject* args);
PyObject*
Message_addQuestion(s_Message* self, PyObject* args) {
- s_Question *question;
+ PyObject* question;
if (!PyArg_ParseTuple(args, "O!", &question_type, &question)) {
return (NULL);
}
- self->message->addQuestion(question->question);
-
+ self->cppobj->addQuestion(PyQuestion_ToQuestion(question));
+
Py_RETURN_NONE;
}
@@ -605,15 +525,15 @@ PyObject*
Message_addRRset(s_Message* self, PyObject* args) {
PyObject *sign = Py_False;
int section;
- s_RRset* rrset;
+ PyObject* rrset;
if (!PyArg_ParseTuple(args, "iO!|O!", §ion, &rrset_type, &rrset,
&PyBool_Type, &sign)) {
return (NULL);
}
try {
- self->message->addRRset(static_cast<Message::Section>(section),
- rrset->rrset, sign == Py_True);
+ self->cppobj->addRRset(static_cast<Message::Section>(section),
+ PyRRset_ToRRsetPtr(rrset), sign == Py_True);
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -634,10 +554,10 @@ Message_clear(s_Message* self, PyObject* args) {
if (PyArg_ParseTuple(args, "i", &i)) {
PyErr_Clear();
if (i == Message::PARSE) {
- self->message->clear(Message::PARSE);
+ self->cppobj->clear(Message::PARSE);
Py_RETURN_NONE;
} else if (i == Message::RENDER) {
- self->message->clear(Message::RENDER);
+ self->cppobj->clear(Message::RENDER);
Py_RETURN_NONE;
} else {
PyErr_SetString(PyExc_TypeError,
@@ -651,7 +571,7 @@ Message_clear(s_Message* self, PyObject* args) {
PyObject*
Message_makeResponse(s_Message* self) {
- self->message->makeResponse();
+ self->cppobj->makeResponse();
Py_RETURN_NONE;
}
@@ -659,7 +579,7 @@ PyObject*
Message_toText(s_Message* self) {
// Py_BuildValue makes python objects from native data
try {
- return (Py_BuildValue("s", self->message->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
} catch (const InvalidMessageOperation& imo) {
PyErr_Clear();
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -680,16 +600,17 @@ Message_str(PyObject* self) {
PyObject*
Message_toWire(s_Message* self, PyObject* args) {
- s_MessageRenderer* mr;
- s_TSIGContext* tsig_ctx = NULL;
-
+ PyObject* mr;
+ PyObject* tsig_ctx = NULL;
+
if (PyArg_ParseTuple(args, "O!|O!", &messagerenderer_type, &mr,
&tsigcontext_type, &tsig_ctx)) {
try {
if (tsig_ctx == NULL) {
- self->message->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
} else {
- self->message->toWire(*mr->messagerenderer, *tsig_ctx->cppobj);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr),
+ PyTSIGContext_ToTSIGContext(tsig_ctx));
}
// If we return NULL it is seen as an error, so use this for
// None returns
@@ -721,97 +642,125 @@ Message_toWire(s_Message* self, PyObject* args) {
}
PyObject*
-Message_fromWire(s_Message* self, PyObject* args) {
+Message_fromWire(PyObject* pyself, PyObject* args) {
+ s_Message* const self = static_cast<s_Message*>(pyself);
const char* b;
Py_ssize_t len;
- if (!PyArg_ParseTuple(args, "y#", &b, &len)) {
- return (NULL);
- }
-
- InputBuffer inbuf(b, len);
- try {
- self->message->fromWire(inbuf);
- Py_RETURN_NONE;
- } catch (const InvalidMessageOperation& imo) {
- PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
- } catch (const DNSMessageFORMERR& dmfe) {
- PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
- return (NULL);
- } catch (const DNSMessageBADVERS& dmfe) {
- PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
- return (NULL);
- } catch (const MessageTooShort& mts) {
- PyErr_SetString(po_MessageTooShort, mts.what());
- return (NULL);
+ unsigned int options = Message::PARSE_DEFAULT;
+
+ if (PyArg_ParseTuple(args, "y#", &b, &len) ||
+ PyArg_ParseTuple(args, "y#I", &b, &len, &options)) {
+ // We need to clear the error in case the first call to ParseTuple
+ // fails.
+ PyErr_Clear();
+
+ InputBuffer inbuf(b, len);
+ try {
+ self->cppobj->fromWire(
+ inbuf, static_cast<Message::ParseOptions>(options));
+ Py_RETURN_NONE;
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ return (NULL);
+ } catch (const DNSMessageFORMERR& dmfe) {
+ PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
+ return (NULL);
+ } catch (const DNSMessageBADVERS& dmfe) {
+ PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
+ return (NULL);
+ } catch (const MessageTooShort& mts) {
+ PyErr_SetString(po_MessageTooShort, mts.what());
+ return (NULL);
+ } catch (const InvalidBufferPosition& ex) {
+ PyErr_SetString(po_DNSMessageFORMERR, ex.what());
+ return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Error in Message.from_wire: " + string(ex.what());
+ PyErr_SetString(PyExc_RuntimeError, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in Message.from_wire");
+ return (NULL);
+ }
}
-}
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Message(PyObject* mod) {
- if (PyType_Ready(&message_type) < 0) {
- return (false);
- }
- Py_INCREF(&message_type);
-
- // Class variables
- // These are added to the tp_dict of the type object
- //
- addClassVariable(message_type, "PARSE",
- Py_BuildValue("I", Message::PARSE));
- addClassVariable(message_type, "RENDER",
- Py_BuildValue("I", Message::RENDER));
-
- addClassVariable(message_type, "HEADERFLAG_QR",
- Py_BuildValue("I", Message::HEADERFLAG_QR));
- addClassVariable(message_type, "HEADERFLAG_AA",
- Py_BuildValue("I", Message::HEADERFLAG_AA));
- addClassVariable(message_type, "HEADERFLAG_TC",
- Py_BuildValue("I", Message::HEADERFLAG_TC));
- addClassVariable(message_type, "HEADERFLAG_RD",
- Py_BuildValue("I", Message::HEADERFLAG_RD));
- addClassVariable(message_type, "HEADERFLAG_RA",
- Py_BuildValue("I", Message::HEADERFLAG_RA));
- addClassVariable(message_type, "HEADERFLAG_AD",
- Py_BuildValue("I", Message::HEADERFLAG_AD));
- addClassVariable(message_type, "HEADERFLAG_CD",
- Py_BuildValue("I", Message::HEADERFLAG_CD));
-
- addClassVariable(message_type, "SECTION_QUESTION",
- Py_BuildValue("I", Message::SECTION_QUESTION));
- addClassVariable(message_type, "SECTION_ANSWER",
- Py_BuildValue("I", Message::SECTION_ANSWER));
- addClassVariable(message_type, "SECTION_AUTHORITY",
- Py_BuildValue("I", Message::SECTION_AUTHORITY));
- addClassVariable(message_type, "SECTION_ADDITIONAL",
- Py_BuildValue("I", Message::SECTION_ADDITIONAL));
-
- addClassVariable(message_type, "DEFAULT_MAX_UDPSIZE",
- Py_BuildValue("I", Message::DEFAULT_MAX_UDPSIZE));
-
- /* Class-specific exceptions */
- po_MessageTooShort = PyErr_NewException("pydnspp.MessageTooShort", NULL,
- NULL);
- PyModule_AddObject(mod, "MessageTooShort", po_MessageTooShort);
- po_InvalidMessageSection =
- PyErr_NewException("pydnspp.InvalidMessageSection", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageSection", po_InvalidMessageSection);
- po_InvalidMessageOperation =
- PyErr_NewException("pydnspp.InvalidMessageOperation", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageOperation",
- po_InvalidMessageOperation);
- po_InvalidMessageUDPSize =
- PyErr_NewException("pydnspp.InvalidMessageUDPSize", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageUDPSize", po_InvalidMessageUDPSize);
- po_DNSMessageBADVERS = PyErr_NewException("pydnspp.DNSMessageBADVERS",
- NULL, NULL);
- PyModule_AddObject(mod, "DNSMessageBADVERS", po_DNSMessageBADVERS);
-
- PyModule_AddObject(mod, "Message",
- reinterpret_cast<PyObject*>(&message_type));
-
-
- return (true);
+ PyErr_SetString(PyExc_TypeError,
+ "from_wire() arguments must be a byte object and "
+ "(optional) parse options");
+ return (NULL);
}
+
} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_MessageTooShort;
+PyObject* po_InvalidMessageSection;
+PyObject* po_InvalidMessageOperation;
+PyObject* po_InvalidMessageUDPSize;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Message
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject message_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Message",
+ sizeof(s_Message), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Message_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Message_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Message class encapsulates a standard DNS message.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Message_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Message_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/message_python.h b/src/lib/dns/python/message_python.h
new file mode 100644
index 0000000..be23890
--- /dev/null
+++ b/src/lib/dns/python/message_python.h
@@ -0,0 +1,40 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_MESSAGE_H
+#define __PYTHON_MESSAGE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Message;
+
+namespace python {
+
+extern PyObject* po_MessageTooShort;
+extern PyObject* po_InvalidMessageSection;
+extern PyObject* po_InvalidMessageOperation;
+extern PyObject* po_InvalidMessageUDPSize;
+
+extern PyTypeObject message_type;
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_MESSAGE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/message_python_inc.cc b/src/lib/dns/python/message_python_inc.cc
new file mode 100644
index 0000000..561c494
--- /dev/null
+++ b/src/lib/dns/python/message_python_inc.cc
@@ -0,0 +1,41 @@
+namespace {
+const char* const Message_fromWire_doc = "\
+from_wire(data, options=PARSE_DEFAULT)\n\
+\n\
+(Re)build a Message object from wire-format data.\n\
+\n\
+This method parses the given wire format data to build a complete\n\
+Message object. On success, the values of the header section fields\n\
+can be accessible via corresponding get methods, and the question and\n\
+following sections can be accessible via the corresponding iterators.\n\
+If the message contains an EDNS or TSIG, they can be accessible via\n\
+get_edns() and get_tsig_record(), respectively.\n\
+\n\
+This Message must be in the PARSE mode.\n\
+\n\
+This method performs strict validation on the given message based on\n\
+the DNS protocol specifications. If the given message data is invalid,\n\
+this method throws an exception (see the exception list).\n\
+\n\
+By default, this method combines RRs of the same name, RR type and RR\n\
+class in a section into a single RRset, even if they are interleaved\n\
+with a different type of RR (though it would be a rare case in\n\
+practice). If the PRESERVE_ORDER option is specified, it handles each\n\
+RR separately, in the appearing order, and converts it to a separate\n\
+RRset (so this RRset should contain exactly one Rdata). This mode will\n\
+be necessary when the higher level protocol is ordering conscious. For\n\
+example, in AXFR and IXFR, the position of the SOA RRs are crucial.\n\
+\n\
+Exceptions:\n\
+ InvalidMessageOperation Message is in the RENDER mode\n\
+ DNSMessageFORMERR The given message data is syntactically\n\
+ MessageTooShort The given data is shorter than a valid header\n\
+ section\n\
+ Others Name, Rdata, and EDNS classes can also throw\n\
+\n\
+Parameters:\n\
+ data A byte object of the wire data\n\
+ options Parse options\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/dns/python/messagerenderer_python.cc b/src/lib/dns/python/messagerenderer_python.cc
index e6f5d3e..bb89622 100644
--- a/src/lib/dns/python/messagerenderer_python.cc
+++ b/src/lib/dns/python/messagerenderer_python.cc
@@ -17,6 +17,7 @@
#include <util/buffer.h>
#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
#include "pydnspp_common.h"
#include "messagerenderer_python.h"
@@ -24,15 +25,21 @@
using namespace isc::dns;
using namespace isc::dns::python;
using namespace isc::util;
-
-// MessageRenderer
-
-s_MessageRenderer::s_MessageRenderer() : outputbuffer(NULL),
- messagerenderer(NULL)
-{
-}
+using namespace isc::util::python;
namespace {
+// The s_* Class simply covers one instantiation of the object.
+//
+// since we don't use *Buffer in the python version (but work with
+// the already existing bytearray type where we use these custom buffers
+// in C++, we need to keep track of one here.
+class s_MessageRenderer : public PyObject {
+public:
+ s_MessageRenderer();
+ isc::util::OutputBuffer* outputbuffer;
+ MessageRenderer* cppobj;
+};
+
int MessageRenderer_init(s_MessageRenderer* self);
void MessageRenderer_destroy(s_MessageRenderer* self);
@@ -72,15 +79,15 @@ PyMethodDef MessageRenderer_methods[] = {
int
MessageRenderer_init(s_MessageRenderer* self) {
self->outputbuffer = new OutputBuffer(4096);
- self->messagerenderer = new MessageRenderer(*self->outputbuffer);
+ self->cppobj = new MessageRenderer(*self->outputbuffer);
return (0);
}
void
MessageRenderer_destroy(s_MessageRenderer* self) {
- delete self->messagerenderer;
+ delete self->cppobj;
delete self->outputbuffer;
- self->messagerenderer = NULL;
+ self->cppobj = NULL;
self->outputbuffer = NULL;
Py_TYPE(self)->tp_free(self);
}
@@ -88,18 +95,18 @@ MessageRenderer_destroy(s_MessageRenderer* self) {
PyObject*
MessageRenderer_getData(s_MessageRenderer* self) {
return (Py_BuildValue("y#",
- self->messagerenderer->getData(),
- self->messagerenderer->getLength()));
+ self->cppobj->getData(),
+ self->cppobj->getLength()));
}
PyObject*
MessageRenderer_getLength(s_MessageRenderer* self) {
- return (Py_BuildValue("I", self->messagerenderer->getLength()));
+ return (Py_BuildValue("I", self->cppobj->getLength()));
}
PyObject*
MessageRenderer_isTruncated(s_MessageRenderer* self) {
- if (self->messagerenderer->isTruncated()) {
+ if (self->cppobj->isTruncated()) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -108,17 +115,17 @@ MessageRenderer_isTruncated(s_MessageRenderer* self) {
PyObject*
MessageRenderer_getLengthLimit(s_MessageRenderer* self) {
- return (Py_BuildValue("I", self->messagerenderer->getLengthLimit()));
+ return (Py_BuildValue("I", self->cppobj->getLengthLimit()));
}
PyObject*
MessageRenderer_getCompressMode(s_MessageRenderer* self) {
- return (Py_BuildValue("I", self->messagerenderer->getCompressMode()));
+ return (Py_BuildValue("I", self->cppobj->getCompressMode()));
}
PyObject*
MessageRenderer_setTruncated(s_MessageRenderer* self) {
- self->messagerenderer->setTruncated();
+ self->cppobj->setTruncated();
Py_RETURN_NONE;
}
@@ -138,7 +145,7 @@ MessageRenderer_setLengthLimit(s_MessageRenderer* self,
"MessageRenderer length limit out of range");
return (NULL);
}
- self->messagerenderer->setLengthLimit(lengthlimit);
+ self->cppobj->setLengthLimit(lengthlimit);
Py_RETURN_NONE;
}
@@ -152,12 +159,12 @@ MessageRenderer_setCompressMode(s_MessageRenderer* self,
}
if (mode == MessageRenderer::CASE_INSENSITIVE) {
- self->messagerenderer->setCompressMode(MessageRenderer::CASE_INSENSITIVE);
+ self->cppobj->setCompressMode(MessageRenderer::CASE_INSENSITIVE);
// If we return NULL it is seen as an error, so use this for
// None returns, it also applies to CASE_SENSITIVE.
Py_RETURN_NONE;
} else if (mode == MessageRenderer::CASE_SENSITIVE) {
- self->messagerenderer->setCompressMode(MessageRenderer::CASE_SENSITIVE);
+ self->cppobj->setCompressMode(MessageRenderer::CASE_SENSITIVE);
Py_RETURN_NONE;
} else {
PyErr_SetString(PyExc_TypeError,
@@ -169,12 +176,11 @@ MessageRenderer_setCompressMode(s_MessageRenderer* self,
PyObject*
MessageRenderer_clear(s_MessageRenderer* self) {
- self->messagerenderer->clear();
+ self->cppobj->clear();
Py_RETURN_NONE;
}
} // end of unnamed namespace
-// end of MessageRenderer
namespace isc {
namespace dns {
namespace python {
@@ -233,37 +239,29 @@ PyTypeObject messagerenderer_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_MessageRenderer(PyObject* mod) {
- // Add the exceptions to the module
+// If we need a createMessageRendererObject(), should we copy? can we?
+// copy the existing buffer into a new one, then create a new renderer with
+// that buffer?
- // Add the enums to the module
-
- // Add the constants to the module
-
- // Add the classes to the module
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module
+bool
+PyMessageRenderer_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &messagerenderer_type));
+}
- // NameComparisonResult
- if (PyType_Ready(&messagerenderer_type) < 0) {
- return (false);
+MessageRenderer&
+PyMessageRenderer_ToMessageRenderer(PyObject* messagerenderer_obj) {
+ if (messagerenderer_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in MessageRenderer PyObject conversion");
}
- Py_INCREF(&messagerenderer_type);
+ s_MessageRenderer* messagerenderer = static_cast<s_MessageRenderer*>(messagerenderer_obj);
+ return (*messagerenderer->cppobj);
+}
- // Class variables
- // These are added to the tp_dict of the type object
- addClassVariable(messagerenderer_type, "CASE_INSENSITIVE",
- Py_BuildValue("I", MessageRenderer::CASE_INSENSITIVE));
- addClassVariable(messagerenderer_type, "CASE_SENSITIVE",
- Py_BuildValue("I", MessageRenderer::CASE_SENSITIVE));
- PyModule_AddObject(mod, "MessageRenderer",
- reinterpret_cast<PyObject*>(&messagerenderer_type));
-
- return (true);
-}
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/messagerenderer_python.h b/src/lib/dns/python/messagerenderer_python.h
index 3bb096e..ea9a940 100644
--- a/src/lib/dns/python/messagerenderer_python.h
+++ b/src/lib/dns/python/messagerenderer_python.h
@@ -17,30 +17,35 @@
#include <Python.h>
+#include <util/buffer.h>
+
namespace isc {
-namespace util {
-class OutputBuffer;
-}
namespace dns {
class MessageRenderer;
namespace python {
-// The s_* Class simply covers one instantiation of the object.
-//
-// since we don't use *Buffer in the python version (but work with
-// the already existing bytearray type where we use these custom buffers
-// in C++, we need to keep track of one here.
-class s_MessageRenderer : public PyObject {
-public:
- s_MessageRenderer();
- isc::util::OutputBuffer* outputbuffer;
- MessageRenderer* messagerenderer;
-};
-
extern PyTypeObject messagerenderer_type;
-bool initModulePart_MessageRenderer(PyObject* mod);
+/// \brief Checks if the given python object is a MessageRenderer object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type MessageRenderer, false otherwise
+bool PyMessageRenderer_Check(PyObject* obj);
+
+/// \brief Returns a reference to the MessageRenderer object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type MessageRenderer; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyMessageRenderer_Check()
+///
+/// \note This is not a copy; if the MessageRenderer is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param messagerenderer_obj The messagerenderer object to convert
+MessageRenderer& PyMessageRenderer_ToMessageRenderer(PyObject* messagerenderer_obj);
} // namespace python
} // namespace dns
diff --git a/src/lib/dns/python/name_python.cc b/src/lib/dns/python/name_python.cc
index d00c6f7..4043445 100644
--- a/src/lib/dns/python/name_python.cc
+++ b/src/lib/dns/python/name_python.cc
@@ -25,20 +25,25 @@
#include "messagerenderer_python.h"
#include "name_python.h"
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
using namespace isc::dns;
using namespace isc::dns::python;
using namespace isc::util;
using namespace isc::util::python;
namespace {
-// NameComparisonResult
+// The s_* Class simply covers one instantiation of the object.
+class s_NameComparisonResult : public PyObject {
+public:
+ s_NameComparisonResult() : cppobj(NULL) {}
+ NameComparisonResult* cppobj;
+};
+
+class s_Name : public PyObject {
+public:
+ s_Name() : cppobj(NULL), position(0) {}
+ Name* cppobj;
+ size_t position;
+};
int NameComparisonResult_init(s_NameComparisonResult*, PyObject*);
void NameComparisonResult_destroy(s_NameComparisonResult* self);
@@ -84,9 +89,7 @@ PyObject*
NameComparisonResult_getRelation(s_NameComparisonResult* self) {
return (Py_BuildValue("I", self->cppobj->getRelation()));
}
-// end of NameComparisonResult
-// Name
// Shortcut type which would be convenient for adding class variables safely.
typedef CPPPyObjectContainer<s_Name, Name> NameContainer;
@@ -292,7 +295,7 @@ Name_str(PyObject* self) {
PyObject*
Name_toWire(s_Name* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
+ PyObject* mr;
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
@@ -306,7 +309,7 @@ Name_toWire(s_Name* self, PyObject* args) {
Py_DECREF(name_bytes);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->cppobj->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -495,7 +498,7 @@ Name_isWildCard(s_Name* self) {
Py_RETURN_FALSE;
}
}
-// end of Name
+
} // end of unnamed namespace
namespace isc {
@@ -634,94 +637,32 @@ PyTypeObject name_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Name(PyObject* mod) {
- // Add the classes to the module
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module
-
- //
- // NameComparisonResult
- //
- if (PyType_Ready(&name_comparison_result_type) < 0) {
- return (false);
- }
- Py_INCREF(&name_comparison_result_type);
-
- // Add the enums to the module
- po_NameRelation = Py_BuildValue("{i:s,i:s,i:s,i:s}",
- NameComparisonResult::SUPERDOMAIN, "SUPERDOMAIN",
- NameComparisonResult::SUBDOMAIN, "SUBDOMAIN",
- NameComparisonResult::EQUAL, "EQUAL",
- NameComparisonResult::COMMONANCESTOR, "COMMONANCESTOR");
- addClassVariable(name_comparison_result_type, "NameRelation", po_NameRelation);
-
- PyModule_AddObject(mod, "NameComparisonResult",
- reinterpret_cast<PyObject*>(&name_comparison_result_type));
-
- //
- // Name
- //
-
- if (PyType_Ready(&name_type) < 0) {
- return (false);
- }
- Py_INCREF(&name_type);
-
- // Add the constants to the module
- addClassVariable(name_type, "MAX_WIRE", Py_BuildValue("I", Name::MAX_WIRE));
- addClassVariable(name_type, "MAX_LABELS", Py_BuildValue("I", Name::MAX_LABELS));
- addClassVariable(name_type, "MAX_LABELLEN", Py_BuildValue("I", Name::MAX_LABELLEN));
- addClassVariable(name_type, "MAX_COMPRESS_POINTER", Py_BuildValue("I", Name::MAX_COMPRESS_POINTER));
- addClassVariable(name_type, "COMPRESS_POINTER_MARK8", Py_BuildValue("I", Name::COMPRESS_POINTER_MARK8));
- addClassVariable(name_type, "COMPRESS_POINTER_MARK16", Py_BuildValue("I", Name::COMPRESS_POINTER_MARK16));
-
- s_Name* root_name = PyObject_New(s_Name, &name_type);
- root_name->cppobj = new Name(Name::ROOT_NAME());
- PyObject* po_ROOT_NAME = root_name;
- addClassVariable(name_type, "ROOT_NAME", po_ROOT_NAME);
-
- PyModule_AddObject(mod, "Name",
- reinterpret_cast<PyObject*>(&name_type));
-
-
- // Add the exceptions to the module
- po_EmptyLabel = PyErr_NewException("pydnspp.EmptyLabel", NULL, NULL);
- PyModule_AddObject(mod, "EmptyLabel", po_EmptyLabel);
-
- po_TooLongName = PyErr_NewException("pydnspp.TooLongName", NULL, NULL);
- PyModule_AddObject(mod, "TooLongName", po_TooLongName);
-
- po_TooLongLabel = PyErr_NewException("pydnspp.TooLongLabel", NULL, NULL);
- PyModule_AddObject(mod, "TooLongLabel", po_TooLongLabel);
-
- po_BadLabelType = PyErr_NewException("pydnspp.BadLabelType", NULL, NULL);
- PyModule_AddObject(mod, "BadLabelType", po_BadLabelType);
-
- po_BadEscape = PyErr_NewException("pydnspp.BadEscape", NULL, NULL);
- PyModule_AddObject(mod, "BadEscape", po_BadEscape);
-
- po_IncompleteName = PyErr_NewException("pydnspp.IncompleteName", NULL, NULL);
- PyModule_AddObject(mod, "IncompleteName", po_IncompleteName);
-
- po_InvalidBufferPosition = PyErr_NewException("pydnspp.InvalidBufferPosition", NULL, NULL);
- PyModule_AddObject(mod, "InvalidBufferPosition", po_InvalidBufferPosition);
-
- // This one could have gone into the message_python.cc file, but is
- // already needed here.
- po_DNSMessageFORMERR = PyErr_NewException("pydnspp.DNSMessageFORMERR", NULL, NULL);
- PyModule_AddObject(mod, "DNSMessageFORMERR", po_DNSMessageFORMERR);
-
- return (true);
-}
-
PyObject*
createNameObject(const Name& source) {
- NameContainer container = PyObject_New(s_Name, &name_type);
+ NameContainer container(PyObject_New(s_Name, &name_type));
container.set(new Name(source));
return (container.release());
}
+
+bool
+PyName_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &name_type));
+}
+
+const Name&
+PyName_ToName(const PyObject* name_obj) {
+ if (name_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Name PyObject conversion");
+ }
+ const s_Name* name = static_cast<const s_Name*>(name_obj);
+ return (*name->cppobj);
+}
+
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/name_python.h b/src/lib/dns/python/name_python.h
index f8e793d..86d7fd0 100644
--- a/src/lib/dns/python/name_python.h
+++ b/src/lib/dns/python/name_python.h
@@ -17,20 +17,12 @@
#include <Python.h>
-#include <util/python/pycppwrapper_util.h>
-
namespace isc {
namespace dns {
-class NameComparisonResult;
class Name;
namespace python {
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the module init at the
-// end
-//
extern PyObject* po_EmptyLabel;
extern PyObject* po_TooLongName;
extern PyObject* po_TooLongLabel;
@@ -47,25 +39,9 @@ extern PyObject* po_DNSMessageFORMERR;
//
extern PyObject* po_NameRelation;
-// The s_* Class simply covers one instantiation of the object.
-class s_NameComparisonResult : public PyObject {
-public:
- s_NameComparisonResult() : cppobj(NULL) {}
- NameComparisonResult* cppobj;
-};
-
-class s_Name : public PyObject {
-public:
- s_Name() : cppobj(NULL), position(0) {}
- Name* cppobj;
- size_t position;
-};
-
extern PyTypeObject name_comparison_result_type;
extern PyTypeObject name_type;
-bool initModulePart_Name(PyObject* mod);
-
/// This is A simple shortcut to create a python Name object (in the
/// form of a pointer to PyObject) with minimal exception safety.
/// On success, it returns a valid pointer to PyObject with a reference
@@ -74,6 +50,27 @@ bool initModulePart_Name(PyObject* mod);
/// This function is expected to be called with in a try block
/// followed by necessary setup for python exception.
PyObject* createNameObject(const Name& source);
+
+/// \brief Checks if the given python object is a Name object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Name, false otherwise
+bool PyName_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Name object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Name; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyName_Check()
+///
+/// \note This is not a copy; if the Name is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param name_obj The name object to convert
+const Name& PyName_ToName(const PyObject* name_obj);
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/opcode_python.cc b/src/lib/dns/python/opcode_python.cc
index 0e2a30b..50436a9 100644
--- a/src/lib/dns/python/opcode_python.cc
+++ b/src/lib/dns/python/opcode_python.cc
@@ -12,32 +12,31 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <dns/opcode.h>
-
-using namespace isc::dns;
+#include <Python.h>
-//
-// Declaration of the custom exceptions (None for this class)
+#include <dns/opcode.h>
+#include <util/python/pycppwrapper_util.h>
-//
-// Definition of the classes
-//
+#include "pydnspp_common.h"
+#include "opcode_python.h"
+#include "edns_python.h"
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
namespace {
-//
-// Opcode
-//
+
class s_Opcode : public PyObject {
public:
- s_Opcode() : opcode(NULL), static_code(false) {}
- const Opcode* opcode;
+ s_Opcode() : cppobj(NULL), static_code(false) {}
+ const isc::dns::Opcode* cppobj;
bool static_code;
};
+typedef CPPPyObjectContainer<s_Opcode, Opcode> OpcodeContainer;
+
int Opcode_init(s_Opcode* const self, PyObject* args);
void Opcode_destroy(s_Opcode* const self);
@@ -103,64 +102,13 @@ PyMethodDef Opcode_methods[] = {
{ NULL, NULL, 0, NULL }
};
-PyTypeObject opcode_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Opcode",
- sizeof(s_Opcode), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Opcode_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Opcode_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Opcode class objects represent standard OPCODEs "
- "of the header section of DNS messages.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)Opcode_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Opcode_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Opcode_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
int
Opcode_init(s_Opcode* const self, PyObject* args) {
uint8_t code = 0;
if (PyArg_ParseTuple(args, "b", &code)) {
try {
- self->opcode = new Opcode(code);
+ self->cppobj = new Opcode(code);
self->static_code = false;
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
@@ -181,22 +129,22 @@ Opcode_init(s_Opcode* const self, PyObject* args) {
void
Opcode_destroy(s_Opcode* const self) {
// Depending on whether we created the rcode or are referring
- // to a global static one, we do or do not delete self->opcode here
+ // to a global static one, we do or do not delete self->cppobj here
if (!self->static_code) {
- delete self->opcode;
+ delete self->cppobj;
}
- self->opcode = NULL;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
PyObject*
Opcode_getCode(const s_Opcode* const self) {
- return (Py_BuildValue("I", self->opcode->getCode()));
+ return (Py_BuildValue("I", self->cppobj->getCode()));
}
PyObject*
Opcode_toText(const s_Opcode* const self) {
- return (Py_BuildValue("s", self->opcode->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
PyObject*
@@ -211,7 +159,7 @@ PyObject*
Opcode_createStatic(const Opcode& opcode) {
s_Opcode* ret = PyObject_New(s_Opcode, &opcode_type);
if (ret != NULL) {
- ret->opcode = &opcode;
+ ret->cppobj = &opcode;
ret->static_code = true;
}
return (ret);
@@ -297,7 +245,7 @@ Opcode_RESERVED15(const s_Opcode*) {
return (Opcode_createStatic(Opcode::RESERVED15()));
}
-PyObject*
+PyObject*
Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
const int op)
{
@@ -318,10 +266,10 @@ Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
PyErr_SetString(PyExc_TypeError, "Unorderable type; Opcode");
return (NULL);
case Py_EQ:
- c = (*self->opcode == *other->opcode);
+ c = (*self->cppobj == *other->cppobj);
break;
case Py_NE:
- c = (*self->opcode != *other->opcode);
+ c = (*self->cppobj != *other->cppobj);
break;
case Py_GT:
PyErr_SetString(PyExc_TypeError, "Unorderable type; Opcode");
@@ -336,55 +284,88 @@ Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
Py_RETURN_FALSE;
}
-// Module Initialization, all statics are initialized here
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+PyTypeObject opcode_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Opcode",
+ sizeof(s_Opcode), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Opcode_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Opcode_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Opcode class objects represent standard OPCODEs "
+ "of the header section of DNS messages.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)Opcode_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Opcode_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Opcode_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createOpcodeObject(const Opcode& source) {
+ OpcodeContainer container(PyObject_New(s_Opcode, &opcode_type));
+ container.set(new Opcode(source));
+ return (container.release());
+}
+
bool
-initModulePart_Opcode(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&opcode_type) < 0) {
- return (false);
- }
- Py_INCREF(&opcode_type);
- void* p = &opcode_type;
- if (PyModule_AddObject(mod, "Opcode", static_cast<PyObject*>(p)) != 0) {
- Py_DECREF(&opcode_type);
- return (false);
+PyOpcode_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
+ return (PyObject_TypeCheck(obj, &opcode_type));
+}
- addClassVariable(opcode_type, "QUERY_CODE",
- Py_BuildValue("h", Opcode::QUERY_CODE));
- addClassVariable(opcode_type, "IQUERY_CODE",
- Py_BuildValue("h", Opcode::IQUERY_CODE));
- addClassVariable(opcode_type, "STATUS_CODE",
- Py_BuildValue("h", Opcode::STATUS_CODE));
- addClassVariable(opcode_type, "RESERVED3_CODE",
- Py_BuildValue("h", Opcode::RESERVED3_CODE));
- addClassVariable(opcode_type, "NOTIFY_CODE",
- Py_BuildValue("h", Opcode::NOTIFY_CODE));
- addClassVariable(opcode_type, "UPDATE_CODE",
- Py_BuildValue("h", Opcode::UPDATE_CODE));
- addClassVariable(opcode_type, "RESERVED6_CODE",
- Py_BuildValue("h", Opcode::RESERVED6_CODE));
- addClassVariable(opcode_type, "RESERVED7_CODE",
- Py_BuildValue("h", Opcode::RESERVED7_CODE));
- addClassVariable(opcode_type, "RESERVED8_CODE",
- Py_BuildValue("h", Opcode::RESERVED8_CODE));
- addClassVariable(opcode_type, "RESERVED9_CODE",
- Py_BuildValue("h", Opcode::RESERVED9_CODE));
- addClassVariable(opcode_type, "RESERVED10_CODE",
- Py_BuildValue("h", Opcode::RESERVED10_CODE));
- addClassVariable(opcode_type, "RESERVED11_CODE",
- Py_BuildValue("h", Opcode::RESERVED11_CODE));
- addClassVariable(opcode_type, "RESERVED12_CODE",
- Py_BuildValue("h", Opcode::RESERVED12_CODE));
- addClassVariable(opcode_type, "RESERVED13_CODE",
- Py_BuildValue("h", Opcode::RESERVED13_CODE));
- addClassVariable(opcode_type, "RESERVED14_CODE",
- Py_BuildValue("h", Opcode::RESERVED14_CODE));
- addClassVariable(opcode_type, "RESERVED15_CODE",
- Py_BuildValue("h", Opcode::RESERVED15_CODE));
-
- return (true);
+const Opcode&
+PyOpcode_ToOpcode(const PyObject* opcode_obj) {
+ if (opcode_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Opcode PyObject conversion");
+ }
+ const s_Opcode* opcode = static_cast<const s_Opcode*>(opcode_obj);
+ return (*opcode->cppobj);
}
-} // end of unnamed namespace
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/opcode_python.h b/src/lib/dns/python/opcode_python.h
new file mode 100644
index 0000000..d0aec15
--- /dev/null
+++ b/src/lib/dns/python/opcode_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_OPCODE_H
+#define __PYTHON_OPCODE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Opcode;
+
+namespace python {
+
+extern PyTypeObject opcode_type;
+
+/// This is a simple shortcut to create a python Opcode object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createOpcodeObject(const Opcode& source);
+
+/// \brief Checks if the given python object is a Opcode object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Opcode, false otherwise
+bool PyOpcode_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Opcode object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Opcode; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyOpcode_Check()
+///
+/// \note This is not a copy; if the Opcode is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param opcode_obj The opcode object to convert
+const Opcode& PyOpcode_ToOpcode(const PyObject* opcode_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_OPCODE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc
index 07abf71..0a7d8e5 100644
--- a/src/lib/dns/python/pydnspp.cc
+++ b/src/lib/dns/python/pydnspp.cc
@@ -21,63 +21,707 @@
// name initModulePart_<name>, and return true/false instead of
// NULL/*mod
//
-// And of course care has to be taken that all identifiers be unique
+// The big init function is split up into a separate initModulePart function
+// for each class we add.
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <structmember.h>
-#include <config.h>
-
-#include <exceptions/exceptions.h>
-
-#include <util/buffer.h>
-
-#include <dns/exceptions.h>
-#include <dns/name.h>
-#include <dns/messagerenderer.h>
+#include <dns/message.h>
+#include <dns/opcode.h>
+#include <dns/tsig.h>
+#include <util/python/pycppwrapper_util.h>
#include "pydnspp_common.h"
+
+#include "edns_python.h"
+#include "message_python.h"
#include "messagerenderer_python.h"
#include "name_python.h"
+#include "opcode_python.h"
+#include "pydnspp_common.h"
+#include "pydnspp_towire.h"
+#include "question_python.h"
#include "rcode_python.h"
+#include "rdata_python.h"
+#include "rrclass_python.h"
+#include "rrset_python.h"
+#include "rrttl_python.h"
+#include "rrtype_python.h"
+#include "tsigerror_python.h"
#include "tsigkey_python.h"
+#include "tsig_python.h"
#include "tsig_rdata_python.h"
-#include "tsigerror_python.h"
#include "tsigrecord_python.h"
-#include "tsig_python.h"
-namespace isc {
-namespace dns {
-namespace python {
-// For our 'general' isc::Exceptions
-PyObject* po_IscException;
-PyObject* po_InvalidParameter;
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util::python;
+
+namespace {
+
+bool
+initModulePart_EDNS(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ //
+ // After the type has been initialized, we initialize any exceptions
+ // that are defined in the wrapper for this class, and add constants
+ // to the type, if any
+
+ if (PyType_Ready(&edns_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&edns_type);
+ void* p = &edns_type;
+ PyModule_AddObject(mod, "EDNS", static_cast<PyObject*>(p));
+
+ addClassVariable(edns_type, "SUPPORTED_VERSION",
+ Py_BuildValue("B", EDNS::SUPPORTED_VERSION));
-// For our own isc::dns::Exception
-PyObject* po_DNSMessageBADVERS;
+ return (true);
}
+
+bool
+initModulePart_Message(PyObject* mod) {
+ if (PyType_Ready(&message_type) < 0) {
+ return (false);
+ }
+ void* p = &message_type;
+ if (PyModule_AddObject(mod, "Message", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&message_type);
+
+ try {
+ //
+ // Constant class variables
+ //
+
+ // Parse mode
+ installClassVariable(message_type, "PARSE",
+ Py_BuildValue("I", Message::PARSE));
+ installClassVariable(message_type, "RENDER",
+ Py_BuildValue("I", Message::RENDER));
+
+ // Parse options
+ installClassVariable(message_type, "PARSE_DEFAULT",
+ Py_BuildValue("I", Message::PARSE_DEFAULT));
+ installClassVariable(message_type, "PRESERVE_ORDER",
+ Py_BuildValue("I", Message::PRESERVE_ORDER));
+
+ // Header flags
+ installClassVariable(message_type, "HEADERFLAG_QR",
+ Py_BuildValue("I", Message::HEADERFLAG_QR));
+ installClassVariable(message_type, "HEADERFLAG_AA",
+ Py_BuildValue("I", Message::HEADERFLAG_AA));
+ installClassVariable(message_type, "HEADERFLAG_TC",
+ Py_BuildValue("I", Message::HEADERFLAG_TC));
+ installClassVariable(message_type, "HEADERFLAG_RD",
+ Py_BuildValue("I", Message::HEADERFLAG_RD));
+ installClassVariable(message_type, "HEADERFLAG_RA",
+ Py_BuildValue("I", Message::HEADERFLAG_RA));
+ installClassVariable(message_type, "HEADERFLAG_AD",
+ Py_BuildValue("I", Message::HEADERFLAG_AD));
+ installClassVariable(message_type, "HEADERFLAG_CD",
+ Py_BuildValue("I", Message::HEADERFLAG_CD));
+
+ // Sections
+ installClassVariable(message_type, "SECTION_QUESTION",
+ Py_BuildValue("I", Message::SECTION_QUESTION));
+ installClassVariable(message_type, "SECTION_ANSWER",
+ Py_BuildValue("I", Message::SECTION_ANSWER));
+ installClassVariable(message_type, "SECTION_AUTHORITY",
+ Py_BuildValue("I", Message::SECTION_AUTHORITY));
+ installClassVariable(message_type, "SECTION_ADDITIONAL",
+ Py_BuildValue("I", Message::SECTION_ADDITIONAL));
+
+ // Protocol constant
+ installClassVariable(message_type, "DEFAULT_MAX_UDPSIZE",
+ Py_BuildValue("I", Message::DEFAULT_MAX_UDPSIZE));
+
+ /* Class-specific exceptions */
+ po_MessageTooShort =
+ PyErr_NewException("pydnspp.MessageTooShort", NULL, NULL);
+ PyObjectContainer(po_MessageTooShort).installToModule(
+ mod, "MessageTooShort");
+ po_InvalidMessageSection =
+ PyErr_NewException("pydnspp.InvalidMessageSection", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageSection).installToModule(
+ mod, "InvalidMessageSection");
+ po_InvalidMessageOperation =
+ PyErr_NewException("pydnspp.InvalidMessageOperation", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageOperation).installToModule(
+ mod, "InvalidMessageOperation");
+ po_InvalidMessageUDPSize =
+ PyErr_NewException("pydnspp.InvalidMessageUDPSize", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageUDPSize).installToModule(
+ mod, "InvalidMessageUDPSize");
+ po_DNSMessageBADVERS =
+ PyErr_NewException("pydnspp.DNSMessageBADVERS", NULL, NULL);
+ PyObjectContainer(po_DNSMessageBADVERS).installToModule(
+ mod, "DNSMessageBADVERS");
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in Message initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Message initialization");
+ return (false);
+ }
+
+ return (true);
}
+
+bool
+initModulePart_MessageRenderer(PyObject* mod) {
+ if (PyType_Ready(&messagerenderer_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&messagerenderer_type);
+
+ addClassVariable(messagerenderer_type, "CASE_INSENSITIVE",
+ Py_BuildValue("I", MessageRenderer::CASE_INSENSITIVE));
+ addClassVariable(messagerenderer_type, "CASE_SENSITIVE",
+ Py_BuildValue("I", MessageRenderer::CASE_SENSITIVE));
+
+ PyModule_AddObject(mod, "MessageRenderer",
+ reinterpret_cast<PyObject*>(&messagerenderer_type));
+
+ return (true);
}
-// order is important here!
-using namespace isc::dns::python;
+bool
+initModulePart_Name(PyObject* mod) {
-#include <dns/python/rrclass_python.cc> // needs Messagerenderer
-#include <dns/python/rrtype_python.cc> // needs Messagerenderer
-#include <dns/python/rrttl_python.cc> // needs Messagerenderer
-#include <dns/python/rdata_python.cc> // needs Type, Class
-#include <dns/python/rrset_python.cc> // needs Rdata, RRTTL
-#include <dns/python/question_python.cc> // needs RRClass, RRType, RRTTL,
- // Name
-#include <dns/python/opcode_python.cc>
-#include <dns/python/edns_python.cc> // needs Messagerenderer, Rcode
-#include <dns/python/message_python.cc> // needs RRset, Question
+ //
+ // NameComparisonResult
+ //
+ if (PyType_Ready(&name_comparison_result_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&name_comparison_result_type);
+
+ // Add the enums to the module
+ po_NameRelation = Py_BuildValue("{i:s,i:s,i:s,i:s}",
+ NameComparisonResult::SUPERDOMAIN, "SUPERDOMAIN",
+ NameComparisonResult::SUBDOMAIN, "SUBDOMAIN",
+ NameComparisonResult::EQUAL, "EQUAL",
+ NameComparisonResult::COMMONANCESTOR, "COMMONANCESTOR");
+ addClassVariable(name_comparison_result_type, "NameRelation",
+ po_NameRelation);
+
+ PyModule_AddObject(mod, "NameComparisonResult",
+ reinterpret_cast<PyObject*>(&name_comparison_result_type));
+
+ //
+ // Name
+ //
+
+ if (PyType_Ready(&name_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&name_type);
+
+ // Add the constants to the module
+ addClassVariable(name_type, "MAX_WIRE",
+ Py_BuildValue("I", Name::MAX_WIRE));
+ addClassVariable(name_type, "MAX_LABELS",
+ Py_BuildValue("I", Name::MAX_LABELS));
+ addClassVariable(name_type, "MAX_LABELLEN",
+ Py_BuildValue("I", Name::MAX_LABELLEN));
+ addClassVariable(name_type, "MAX_COMPRESS_POINTER",
+ Py_BuildValue("I", Name::MAX_COMPRESS_POINTER));
+ addClassVariable(name_type, "COMPRESS_POINTER_MARK8",
+ Py_BuildValue("I", Name::COMPRESS_POINTER_MARK8));
+ addClassVariable(name_type, "COMPRESS_POINTER_MARK16",
+ Py_BuildValue("I", Name::COMPRESS_POINTER_MARK16));
+
+ addClassVariable(name_type, "ROOT_NAME",
+ createNameObject(Name::ROOT_NAME()));
+
+ PyModule_AddObject(mod, "Name",
+ reinterpret_cast<PyObject*>(&name_type));
+
+
+ // Add the exceptions to the module
+ po_EmptyLabel = PyErr_NewException("pydnspp.EmptyLabel", NULL, NULL);
+ PyModule_AddObject(mod, "EmptyLabel", po_EmptyLabel);
+
+ po_TooLongName = PyErr_NewException("pydnspp.TooLongName", NULL, NULL);
+ PyModule_AddObject(mod, "TooLongName", po_TooLongName);
+
+ po_TooLongLabel = PyErr_NewException("pydnspp.TooLongLabel", NULL, NULL);
+ PyModule_AddObject(mod, "TooLongLabel", po_TooLongLabel);
+
+ po_BadLabelType = PyErr_NewException("pydnspp.BadLabelType", NULL, NULL);
+ PyModule_AddObject(mod, "BadLabelType", po_BadLabelType);
+
+ po_BadEscape = PyErr_NewException("pydnspp.BadEscape", NULL, NULL);
+ PyModule_AddObject(mod, "BadEscape", po_BadEscape);
+
+ po_IncompleteName = PyErr_NewException("pydnspp.IncompleteName", NULL, NULL);
+ PyModule_AddObject(mod, "IncompleteName", po_IncompleteName);
+
+ po_InvalidBufferPosition =
+ PyErr_NewException("pydnspp.InvalidBufferPosition", NULL, NULL);
+ PyModule_AddObject(mod, "InvalidBufferPosition", po_InvalidBufferPosition);
+
+ // This one could have gone into the message_python.cc file, but is
+ // already needed here.
+ po_DNSMessageFORMERR = PyErr_NewException("pydnspp.DNSMessageFORMERR",
+ NULL, NULL);
+ PyModule_AddObject(mod, "DNSMessageFORMERR", po_DNSMessageFORMERR);
+
+ return (true);
+}
+
+bool
+initModulePart_Opcode(PyObject* mod) {
+ if (PyType_Ready(&opcode_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&opcode_type);
+ void* p = &opcode_type;
+ if (PyModule_AddObject(mod, "Opcode", static_cast<PyObject*>(p)) != 0) {
+ Py_DECREF(&opcode_type);
+ return (false);
+ }
+
+ addClassVariable(opcode_type, "QUERY_CODE",
+ Py_BuildValue("h", Opcode::QUERY_CODE));
+ addClassVariable(opcode_type, "IQUERY_CODE",
+ Py_BuildValue("h", Opcode::IQUERY_CODE));
+ addClassVariable(opcode_type, "STATUS_CODE",
+ Py_BuildValue("h", Opcode::STATUS_CODE));
+ addClassVariable(opcode_type, "RESERVED3_CODE",
+ Py_BuildValue("h", Opcode::RESERVED3_CODE));
+ addClassVariable(opcode_type, "NOTIFY_CODE",
+ Py_BuildValue("h", Opcode::NOTIFY_CODE));
+ addClassVariable(opcode_type, "UPDATE_CODE",
+ Py_BuildValue("h", Opcode::UPDATE_CODE));
+ addClassVariable(opcode_type, "RESERVED6_CODE",
+ Py_BuildValue("h", Opcode::RESERVED6_CODE));
+ addClassVariable(opcode_type, "RESERVED7_CODE",
+ Py_BuildValue("h", Opcode::RESERVED7_CODE));
+ addClassVariable(opcode_type, "RESERVED8_CODE",
+ Py_BuildValue("h", Opcode::RESERVED8_CODE));
+ addClassVariable(opcode_type, "RESERVED9_CODE",
+ Py_BuildValue("h", Opcode::RESERVED9_CODE));
+ addClassVariable(opcode_type, "RESERVED10_CODE",
+ Py_BuildValue("h", Opcode::RESERVED10_CODE));
+ addClassVariable(opcode_type, "RESERVED11_CODE",
+ Py_BuildValue("h", Opcode::RESERVED11_CODE));
+ addClassVariable(opcode_type, "RESERVED12_CODE",
+ Py_BuildValue("h", Opcode::RESERVED12_CODE));
+ addClassVariable(opcode_type, "RESERVED13_CODE",
+ Py_BuildValue("h", Opcode::RESERVED13_CODE));
+ addClassVariable(opcode_type, "RESERVED14_CODE",
+ Py_BuildValue("h", Opcode::RESERVED14_CODE));
+ addClassVariable(opcode_type, "RESERVED15_CODE",
+ Py_BuildValue("h", Opcode::RESERVED15_CODE));
+
+ return (true);
+}
+
+bool
+initModulePart_Question(PyObject* mod) {
+ if (PyType_Ready(&question_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&question_type);
+ PyModule_AddObject(mod, "Question",
+ reinterpret_cast<PyObject*>(&question_type));
+
+ return (true);
+}
+
+bool
+initModulePart_Rcode(PyObject* mod) {
+ if (PyType_Ready(&rcode_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rcode_type);
+ void* p = &rcode_type;
+ if (PyModule_AddObject(mod, "Rcode", static_cast<PyObject*>(p)) != 0) {
+ Py_DECREF(&rcode_type);
+ return (false);
+ }
+
+ addClassVariable(rcode_type, "NOERROR_CODE",
+ Py_BuildValue("h", Rcode::NOERROR_CODE));
+ addClassVariable(rcode_type, "FORMERR_CODE",
+ Py_BuildValue("h", Rcode::FORMERR_CODE));
+ addClassVariable(rcode_type, "SERVFAIL_CODE",
+ Py_BuildValue("h", Rcode::SERVFAIL_CODE));
+ addClassVariable(rcode_type, "NXDOMAIN_CODE",
+ Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
+ addClassVariable(rcode_type, "NOTIMP_CODE",
+ Py_BuildValue("h", Rcode::NOTIMP_CODE));
+ addClassVariable(rcode_type, "REFUSED_CODE",
+ Py_BuildValue("h", Rcode::REFUSED_CODE));
+ addClassVariable(rcode_type, "YXDOMAIN_CODE",
+ Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
+ addClassVariable(rcode_type, "YXRRSET_CODE",
+ Py_BuildValue("h", Rcode::YXRRSET_CODE));
+ addClassVariable(rcode_type, "NXRRSET_CODE",
+ Py_BuildValue("h", Rcode::NXRRSET_CODE));
+ addClassVariable(rcode_type, "NOTAUTH_CODE",
+ Py_BuildValue("h", Rcode::NOTAUTH_CODE));
+ addClassVariable(rcode_type, "NOTZONE_CODE",
+ Py_BuildValue("h", Rcode::NOTZONE_CODE));
+ addClassVariable(rcode_type, "RESERVED11_CODE",
+ Py_BuildValue("h", Rcode::RESERVED11_CODE));
+ addClassVariable(rcode_type, "RESERVED12_CODE",
+ Py_BuildValue("h", Rcode::RESERVED12_CODE));
+ addClassVariable(rcode_type, "RESERVED13_CODE",
+ Py_BuildValue("h", Rcode::RESERVED13_CODE));
+ addClassVariable(rcode_type, "RESERVED14_CODE",
+ Py_BuildValue("h", Rcode::RESERVED14_CODE));
+ addClassVariable(rcode_type, "RESERVED15_CODE",
+ Py_BuildValue("h", Rcode::RESERVED15_CODE));
+ addClassVariable(rcode_type, "BADVERS_CODE",
+ Py_BuildValue("h", Rcode::BADVERS_CODE));
+
+ return (true);
+}
+
+bool
+initModulePart_Rdata(PyObject* mod) {
+ if (PyType_Ready(&rdata_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rdata_type);
+ PyModule_AddObject(mod, "Rdata",
+ reinterpret_cast<PyObject*>(&rdata_type));
+
+ // Add the exceptions to the class
+ po_InvalidRdataLength = PyErr_NewException("pydnspp.InvalidRdataLength",
+ NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRdataLength", po_InvalidRdataLength);
+
+ po_InvalidRdataText = PyErr_NewException("pydnspp.InvalidRdataText",
+ NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRdataText", po_InvalidRdataText);
+
+ po_CharStringTooLong = PyErr_NewException("pydnspp.CharStringTooLong",
+ NULL, NULL);
+ PyModule_AddObject(mod, "CharStringTooLong", po_CharStringTooLong);
+
+
+ return (true);
+}
+
+bool
+initModulePart_RRClass(PyObject* mod) {
+ po_InvalidRRClass = PyErr_NewException("pydnspp.InvalidRRClass",
+ NULL, NULL);
+ Py_INCREF(po_InvalidRRClass);
+ PyModule_AddObject(mod, "InvalidRRClass", po_InvalidRRClass);
+ po_IncompleteRRClass = PyErr_NewException("pydnspp.IncompleteRRClass",
+ NULL, NULL);
+ Py_INCREF(po_IncompleteRRClass);
+ PyModule_AddObject(mod, "IncompleteRRClass", po_IncompleteRRClass);
+
+ if (PyType_Ready(&rrclass_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrclass_type);
+ PyModule_AddObject(mod, "RRClass",
+ reinterpret_cast<PyObject*>(&rrclass_type));
+
+ return (true);
+}
+
+bool
+initModulePart_RRset(PyObject* mod) {
+ po_EmptyRRset = PyErr_NewException("pydnspp.EmptyRRset", NULL, NULL);
+ PyModule_AddObject(mod, "EmptyRRset", po_EmptyRRset);
+
+ // NameComparisonResult
+ if (PyType_Ready(&rrset_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrset_type);
+ PyModule_AddObject(mod, "RRset",
+ reinterpret_cast<PyObject*>(&rrset_type));
+
+ return (true);
+}
+
+bool
+initModulePart_RRTTL(PyObject* mod) {
+ po_InvalidRRTTL = PyErr_NewException("pydnspp.InvalidRRTTL", NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRRTTL", po_InvalidRRTTL);
+ po_IncompleteRRTTL = PyErr_NewException("pydnspp.IncompleteRRTTL",
+ NULL, NULL);
+ PyModule_AddObject(mod, "IncompleteRRTTL", po_IncompleteRRTTL);
+
+ if (PyType_Ready(&rrttl_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrttl_type);
+ PyModule_AddObject(mod, "RRTTL",
+ reinterpret_cast<PyObject*>(&rrttl_type));
+
+ return (true);
+}
+
+bool
+initModulePart_RRType(PyObject* mod) {
+ // Add the exceptions to the module
+ po_InvalidRRType = PyErr_NewException("pydnspp.InvalidRRType", NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRRType", po_InvalidRRType);
+ po_IncompleteRRType = PyErr_NewException("pydnspp.IncompleteRRType",
+ NULL, NULL);
+ PyModule_AddObject(mod, "IncompleteRRType", po_IncompleteRRType);
+
+ if (PyType_Ready(&rrtype_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrtype_type);
+ PyModule_AddObject(mod, "RRType",
+ reinterpret_cast<PyObject*>(&rrtype_type));
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGError(PyObject* mod) {
+ if (PyType_Ready(&tsigerror_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigerror_type;
+ if (PyModule_AddObject(mod, "TSIGError", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigerror_type);
+
+ try {
+ // Constant class variables
+ // Error codes (bare values)
+ installClassVariable(tsigerror_type, "BAD_SIG_CODE",
+ Py_BuildValue("H", TSIGError::BAD_SIG_CODE));
+ installClassVariable(tsigerror_type, "BAD_KEY_CODE",
+ Py_BuildValue("H", TSIGError::BAD_KEY_CODE));
+ installClassVariable(tsigerror_type, "BAD_TIME_CODE",
+ Py_BuildValue("H", TSIGError::BAD_TIME_CODE));
+
+ // Error codes (constant objects)
+ installClassVariable(tsigerror_type, "NOERROR",
+ createTSIGErrorObject(TSIGError::NOERROR()));
+ installClassVariable(tsigerror_type, "FORMERR",
+ createTSIGErrorObject(TSIGError::FORMERR()));
+ installClassVariable(tsigerror_type, "SERVFAIL",
+ createTSIGErrorObject(TSIGError::SERVFAIL()));
+ installClassVariable(tsigerror_type, "NXDOMAIN",
+ createTSIGErrorObject(TSIGError::NXDOMAIN()));
+ installClassVariable(tsigerror_type, "NOTIMP",
+ createTSIGErrorObject(TSIGError::NOTIMP()));
+ installClassVariable(tsigerror_type, "REFUSED",
+ createTSIGErrorObject(TSIGError::REFUSED()));
+ installClassVariable(tsigerror_type, "YXDOMAIN",
+ createTSIGErrorObject(TSIGError::YXDOMAIN()));
+ installClassVariable(tsigerror_type, "YXRRSET",
+ createTSIGErrorObject(TSIGError::YXRRSET()));
+ installClassVariable(tsigerror_type, "NXRRSET",
+ createTSIGErrorObject(TSIGError::NXRRSET()));
+ installClassVariable(tsigerror_type, "NOTAUTH",
+ createTSIGErrorObject(TSIGError::NOTAUTH()));
+ installClassVariable(tsigerror_type, "NOTZONE",
+ createTSIGErrorObject(TSIGError::NOTZONE()));
+ installClassVariable(tsigerror_type, "RESERVED11",
+ createTSIGErrorObject(TSIGError::RESERVED11()));
+ installClassVariable(tsigerror_type, "RESERVED12",
+ createTSIGErrorObject(TSIGError::RESERVED12()));
+ installClassVariable(tsigerror_type, "RESERVED13",
+ createTSIGErrorObject(TSIGError::RESERVED13()));
+ installClassVariable(tsigerror_type, "RESERVED14",
+ createTSIGErrorObject(TSIGError::RESERVED14()));
+ installClassVariable(tsigerror_type, "RESERVED15",
+ createTSIGErrorObject(TSIGError::RESERVED15()));
+ installClassVariable(tsigerror_type, "BAD_SIG",
+ createTSIGErrorObject(TSIGError::BAD_SIG()));
+ installClassVariable(tsigerror_type, "BAD_KEY",
+ createTSIGErrorObject(TSIGError::BAD_KEY()));
+ installClassVariable(tsigerror_type, "BAD_TIME",
+ createTSIGErrorObject(TSIGError::BAD_TIME()));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGError initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGError initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGKey(PyObject* mod) {
+ if (PyType_Ready(&tsigkey_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigkey_type;
+ if (PyModule_AddObject(mod, "TSIGKey", static_cast<PyObject*>(p)) != 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigkey_type);
+
+ try {
+ // Constant class variables
+ installClassVariable(tsigkey_type, "HMACMD5_NAME",
+ createNameObject(TSIGKey::HMACMD5_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA1_NAME",
+ createNameObject(TSIGKey::HMACSHA1_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA256_NAME",
+ createNameObject(TSIGKey::HMACSHA256_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA224_NAME",
+ createNameObject(TSIGKey::HMACSHA224_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA384_NAME",
+ createNameObject(TSIGKey::HMACSHA384_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA512_NAME",
+ createNameObject(TSIGKey::HMACSHA512_NAME()));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGKey initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGKey initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGKeyRing(PyObject* mod) {
+ if (PyType_Ready(&tsigkeyring_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigkeyring_type);
+ void* p = &tsigkeyring_type;
+ if (PyModule_AddObject(mod, "TSIGKeyRing",
+ static_cast<PyObject*>(p)) != 0) {
+ Py_DECREF(&tsigkeyring_type);
+ return (false);
+ }
+
+ addClassVariable(tsigkeyring_type, "SUCCESS",
+ Py_BuildValue("I", TSIGKeyRing::SUCCESS));
+ addClassVariable(tsigkeyring_type, "EXIST",
+ Py_BuildValue("I", TSIGKeyRing::EXIST));
+ addClassVariable(tsigkeyring_type, "NOTFOUND",
+ Py_BuildValue("I", TSIGKeyRing::NOTFOUND));
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGContext(PyObject* mod) {
+ if (PyType_Ready(&tsigcontext_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigcontext_type;
+ if (PyModule_AddObject(mod, "TSIGContext",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigcontext_type);
+
+ try {
+ // Class specific exceptions
+ po_TSIGContextError = PyErr_NewException("pydnspp.TSIGContextError",
+ po_IscException, NULL);
+ PyObjectContainer(po_TSIGContextError).installToModule(
+ mod, "TSIGContextError");
+
+ // Constant class variables
+ installClassVariable(tsigcontext_type, "STATE_INIT",
+ Py_BuildValue("I", TSIGContext::INIT));
+ installClassVariable(tsigcontext_type, "STATE_SENT_REQUEST",
+ Py_BuildValue("I", TSIGContext::SENT_REQUEST));
+ installClassVariable(tsigcontext_type, "STATE_RECEIVED_REQUEST",
+ Py_BuildValue("I", TSIGContext::RECEIVED_REQUEST));
+ installClassVariable(tsigcontext_type, "STATE_SENT_RESPONSE",
+ Py_BuildValue("I", TSIGContext::SENT_RESPONSE));
+ installClassVariable(tsigcontext_type, "STATE_VERIFIED_RESPONSE",
+ Py_BuildValue("I",
+ TSIGContext::VERIFIED_RESPONSE));
+
+ installClassVariable(tsigcontext_type, "DEFAULT_FUDGE",
+ Py_BuildValue("H", TSIGContext::DEFAULT_FUDGE));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGContext initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGContext initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_TSIG(PyObject* mod) {
+ if (PyType_Ready(&tsig_type) < 0) {
+ return (false);
+ }
+ void* p = &tsig_type;
+ if (PyModule_AddObject(mod, "TSIG", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsig_type);
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGRecord(PyObject* mod) {
+ if (PyType_Ready(&tsigrecord_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigrecord_type;
+ if (PyModule_AddObject(mod, "TSIGRecord", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigrecord_type);
+
+ try {
+ // Constant class variables
+ installClassVariable(tsigrecord_type, "TSIG_TTL",
+ Py_BuildValue("I", 0));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGRecord initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGRecord initialization");
+ return (false);
+ }
+
+ return (true);
+}
-//
-// Definition of the module
-//
-namespace {
PyModuleDef pydnspp = {
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
"pydnspp",
diff --git a/src/lib/dns/python/pydnspp_common.cc b/src/lib/dns/python/pydnspp_common.cc
index 8ca763a..0f0f873 100644
--- a/src/lib/dns/python/pydnspp_common.cc
+++ b/src/lib/dns/python/pydnspp_common.cc
@@ -15,9 +15,45 @@
#include <Python.h>
#include <pydnspp_common.h>
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+
+#include <dns/exceptions.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+
+#include "pydnspp_common.h"
+#include "messagerenderer_python.h"
+#include "name_python.h"
+#include "rdata_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rrset_python.h"
+#include "rcode_python.h"
+#include "opcode_python.h"
+#include "tsigkey_python.h"
+#include "tsig_rdata_python.h"
+#include "tsigerror_python.h"
+#include "tsigrecord_python.h"
+#include "tsig_python.h"
+#include "question_python.h"
+#include "message_python.h"
+
+using namespace isc::dns::python;
+
namespace isc {
namespace dns {
namespace python {
+// For our 'general' isc::Exceptions
+PyObject* po_IscException;
+PyObject* po_InvalidParameter;
+
+// For our own isc::dns::Exception
+PyObject* po_DNSMessageBADVERS;
+
+
int
readDataFromSequence(uint8_t *data, size_t len, PyObject* sequence) {
PyObject* el = NULL;
diff --git a/src/lib/dns/python/pydnspp_common.h b/src/lib/dns/python/pydnspp_common.h
index ed90998..8092b08 100644
--- a/src/lib/dns/python/pydnspp_common.h
+++ b/src/lib/dns/python/pydnspp_common.h
@@ -20,8 +20,6 @@
#include <stdexcept>
#include <string>
-#include <util/python/pycppwrapper_util.h>
-
namespace isc {
namespace dns {
namespace python {
diff --git a/src/lib/dns/python/pydnspp_towire.h b/src/lib/dns/python/pydnspp_towire.h
index 66362a0..e987a29 100644
--- a/src/lib/dns/python/pydnspp_towire.h
+++ b/src/lib/dns/python/pydnspp_towire.h
@@ -93,10 +93,10 @@ toWireWrapper(const PYSTRUCT* const self, PyObject* args) {
}
// To MessageRenderer version
- s_MessageRenderer* renderer;
+ PyObject* renderer;
if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &renderer)) {
const unsigned int n = TOWIRECALLER(*self->cppobj)(
- *renderer->messagerenderer);
+ PyMessageRenderer_ToMessageRenderer(renderer));
return (Py_BuildValue("I", n));
}
diff --git a/src/lib/dns/python/question_python.cc b/src/lib/dns/python/question_python.cc
index c702f85..44d68a2 100644
--- a/src/lib/dns/python/question_python.cc
+++ b/src/lib/dns/python/question_python.cc
@@ -12,25 +12,34 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
#include <dns/question.h>
+#include <dns/messagerenderer.h>
+#include <dns/exceptions.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "pydnspp_common.h"
+#include "question_python.h"
+#include "name_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "messagerenderer_python.h"
+
+using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+using namespace isc;
-//
-// Question
-//
-
-// The s_* Class simply coverst one instantiation of the object
+namespace {
class s_Question : public PyObject {
public:
- QuestionPtr question;
+ isc::dns::QuestionPtr cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
static int Question_init(s_Question* self, PyObject* args);
static void Question_destroy(s_Question* self);
@@ -69,60 +78,6 @@ static PyMethodDef Question_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Question
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject question_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Question",
- sizeof(s_Question), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Question_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Question_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Question class encapsulates the common search key of DNS"
- "lookup, consisting of owner name, RR type and RR class.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Question_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Question_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
static int
Question_init(s_Question* self, PyObject* args) {
// Try out the various combinations of arguments to call the
@@ -131,9 +86,9 @@ Question_init(s_Question* self, PyObject* args) {
// that if we try several like here. Otherwise the *next* python
// call will suddenly appear to throw an exception.
// (the way to do exceptions is to set PyErr and return -1)
- s_Name* name;
- s_RRClass* rrclass;
- s_RRType* rrtype;
+ PyObject* name;
+ PyObject* rrclass;
+ PyObject* rrtype;
const char* b;
Py_ssize_t len;
@@ -141,17 +96,18 @@ Question_init(s_Question* self, PyObject* args) {
try {
if (PyArg_ParseTuple(args, "O!O!O!", &name_type, &name,
- &rrclass_type, &rrclass,
- &rrtype_type, &rrtype
+ &rrclass_type, &rrclass,
+ &rrtype_type, &rrtype
)) {
- self->question = QuestionPtr(new Question(*name->cppobj, *rrclass->rrclass,
- *rrtype->rrtype));
+ self->cppobj = QuestionPtr(new Question(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype)));
return (0);
} else if (PyArg_ParseTuple(args, "y#|I", &b, &len, &position)) {
PyErr_Clear();
InputBuffer inbuf(b, len);
inbuf.setPosition(position);
- self->question = QuestionPtr(new Question(inbuf));
+ self->cppobj = QuestionPtr(new Question(inbuf));
return (0);
}
} catch (const DNSMessageFORMERR& dmfe) {
@@ -168,7 +124,7 @@ Question_init(s_Question* self, PyObject* args) {
return (-1);
}
- self->question = QuestionPtr();
+ self->cppobj = QuestionPtr();
PyErr_Clear();
PyErr_SetString(PyExc_TypeError,
@@ -178,52 +134,62 @@ Question_init(s_Question* self, PyObject* args) {
static void
Question_destroy(s_Question* self) {
- self->question.reset();
+ self->cppobj.reset();
Py_TYPE(self)->tp_free(self);
}
static PyObject*
Question_getName(s_Question* self) {
- s_Name* name;
-
- // is this the best way to do this?
- name = static_cast<s_Name*>(name_type.tp_alloc(&name_type, 0));
- if (name != NULL) {
- name->cppobj = new Name(self->question->getName());
+ try {
+ return (createNameObject(self->cppobj->getName()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question Name: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question Name");
}
-
- return (name);
+ return (NULL);
}
static PyObject*
Question_getType(s_Question* self) {
- s_RRType* rrtype;
-
- rrtype = static_cast<s_RRType*>(rrtype_type.tp_alloc(&rrtype_type, 0));
- if (rrtype != NULL) {
- rrtype->rrtype = new RRType(self->question->getType());
+ try {
+ return (createRRTypeObject(self->cppobj->getType()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRType: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRType");
}
-
- return (rrtype);
+ return (NULL);
}
static PyObject*
Question_getClass(s_Question* self) {
- s_RRClass* rrclass;
-
- rrclass = static_cast<s_RRClass*>(rrclass_type.tp_alloc(&rrclass_type, 0));
- if (rrclass != NULL) {
- rrclass->rrclass = new RRClass(self->question->getClass());
+ try {
+ return (createRRClassObject(self->cppobj->getClass()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRClass: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRClass");
}
-
- return (rrclass);
+ return (NULL);
}
-
static PyObject*
Question_toText(s_Question* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->question->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
static PyObject*
@@ -237,14 +203,14 @@ Question_str(PyObject* self) {
static PyObject*
Question_toWire(s_Question* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
// Max length is Name::MAX_WIRE + rrclass (2) + rrtype (2)
OutputBuffer buffer(Name::MAX_WIRE + 4);
- self->question->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()),
buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
@@ -253,7 +219,7 @@ Question_toWire(s_Question* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->question->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -264,23 +230,92 @@ Question_toWire(s_Question* self, PyObject* args) {
return (NULL);
}
-// end of Question
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Question
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject question_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Question",
+ sizeof(s_Question), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Question_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Question_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Question class encapsulates the common search key of DNS"
+ "lookup, consisting of owner name, RR type and RR class.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Question_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Question_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+PyObject*
+createQuestionObject(const Question& source) {
+ s_Question* question =
+ static_cast<s_Question*>(question_type.tp_alloc(&question_type, 0));
+ question->cppobj = QuestionPtr(new Question(source));
+ return (question);
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_Question(PyObject* mod) {
- // Add the exceptions to the module
+PyQuestion_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &question_type));
+}
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&question_type) < 0) {
- return (false);
+const Question&
+PyQuestion_ToQuestion(const PyObject* question_obj) {
+ if (question_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Question PyObject conversion");
}
- Py_INCREF(&question_type);
- PyModule_AddObject(mod, "Question",
- reinterpret_cast<PyObject*>(&question_type));
-
- return (true);
+ const s_Question* question = static_cast<const s_Question*>(question_obj);
+ return (*question->cppobj);
}
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/question_python.h b/src/lib/dns/python/question_python.h
new file mode 100644
index 0000000..f5d78b1
--- /dev/null
+++ b/src/lib/dns/python/question_python.h
@@ -0,0 +1,66 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_QUESTION_H
+#define __PYTHON_QUESTION_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Question;
+
+namespace python {
+
+extern PyObject* po_EmptyQuestion;
+
+extern PyTypeObject question_type;
+
+/// This is a simple shortcut to create a python Question object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createQuestionObject(const Question& source);
+
+/// \brief Checks if the given python object is a Question object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Question, false otherwise
+bool PyQuestion_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Question object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Question; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyQuestion_Check()
+///
+/// \note This is not a copy; if the Question is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param question_obj The question object to convert
+const Question& PyQuestion_ToQuestion(const PyObject* question_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_QUESTION_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rcode_python.cc b/src/lib/dns/python/rcode_python.cc
index b594ad3..42b48e7 100644
--- a/src/lib/dns/python/rcode_python.cc
+++ b/src/lib/dns/python/rcode_python.cc
@@ -15,34 +15,39 @@
#include <Python.h>
#include <exceptions/exceptions.h>
-
#include <dns/rcode.h>
+#include <util/python/pycppwrapper_util.h>
#include "pydnspp_common.h"
#include "rcode_python.h"
using namespace isc::dns;
using namespace isc::dns::python;
+using namespace isc::util::python;
+namespace {
+// The s_* Class simply covers one instantiation of the object.
//
-// Declaration of the custom exceptions (None for this class)
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// Rcode
+// We added a helper variable static_code here
+// Since we can create Rcodes dynamically with Rcode(int), but also
+// use the static globals (Rcode::NOERROR() etc), we use this
+// variable to see if the code came from one of the latter, in which
+// case Rcode_destroy should not free it (the other option is to
+// allocate new Rcodes for every use of the static ones, but this
+// seems more efficient).
//
+// Follow-up note: we don't have to use the proxy function in the python lib;
+// we can just define class specific constants directly (see TSIGError).
+// We should make this cleanup later.
+class s_Rcode : public PyObject {
+public:
+ s_Rcode() : cppobj(NULL), static_code(false) {};
+ const Rcode* cppobj;
+ bool static_code;
+};
-// Trivial constructor.
-s_Rcode::s_Rcode() : cppobj(NULL), static_code(false) {}
+typedef CPPPyObjectContainer<s_Rcode, Rcode> RcodeContainer;
-namespace {
int Rcode_init(s_Rcode* const self, PyObject* args);
void Rcode_destroy(s_Rcode* const self);
@@ -282,7 +287,7 @@ Rcode_BADVERS(const s_Rcode*) {
return (Rcode_createStatic(Rcode::BADVERS()));
}
-PyObject*
+PyObject*
Rcode_richcmp(const s_Rcode* const self, const s_Rcode* const other,
const int op)
{
@@ -376,59 +381,31 @@ PyTypeObject rcode_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
+PyObject*
+createRcodeObject(const Rcode& source) {
+ RcodeContainer container(PyObject_New(s_Rcode, &rcode_type));
+ container.set(new Rcode(source));
+ return (container.release());
+}
+
bool
-initModulePart_Rcode(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rcode_type) < 0) {
- return (false);
- }
- Py_INCREF(&rcode_type);
- void* p = &rcode_type;
- if (PyModule_AddObject(mod, "Rcode", static_cast<PyObject*>(p)) != 0) {
- Py_DECREF(&rcode_type);
- return (false);
+PyRcode_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
+ return (PyObject_TypeCheck(obj, &rcode_type));
+}
- addClassVariable(rcode_type, "NOERROR_CODE",
- Py_BuildValue("h", Rcode::NOERROR_CODE));
- addClassVariable(rcode_type, "FORMERR_CODE",
- Py_BuildValue("h", Rcode::FORMERR_CODE));
- addClassVariable(rcode_type, "SERVFAIL_CODE",
- Py_BuildValue("h", Rcode::SERVFAIL_CODE));
- addClassVariable(rcode_type, "NXDOMAIN_CODE",
- Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
- addClassVariable(rcode_type, "NOTIMP_CODE",
- Py_BuildValue("h", Rcode::NOTIMP_CODE));
- addClassVariable(rcode_type, "REFUSED_CODE",
- Py_BuildValue("h", Rcode::REFUSED_CODE));
- addClassVariable(rcode_type, "YXDOMAIN_CODE",
- Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
- addClassVariable(rcode_type, "YXRRSET_CODE",
- Py_BuildValue("h", Rcode::YXRRSET_CODE));
- addClassVariable(rcode_type, "NXRRSET_CODE",
- Py_BuildValue("h", Rcode::NXRRSET_CODE));
- addClassVariable(rcode_type, "NOTAUTH_CODE",
- Py_BuildValue("h", Rcode::NOTAUTH_CODE));
- addClassVariable(rcode_type, "NOTZONE_CODE",
- Py_BuildValue("h", Rcode::NOTZONE_CODE));
- addClassVariable(rcode_type, "RESERVED11_CODE",
- Py_BuildValue("h", Rcode::RESERVED11_CODE));
- addClassVariable(rcode_type, "RESERVED12_CODE",
- Py_BuildValue("h", Rcode::RESERVED12_CODE));
- addClassVariable(rcode_type, "RESERVED13_CODE",
- Py_BuildValue("h", Rcode::RESERVED13_CODE));
- addClassVariable(rcode_type, "RESERVED14_CODE",
- Py_BuildValue("h", Rcode::RESERVED14_CODE));
- addClassVariable(rcode_type, "RESERVED15_CODE",
- Py_BuildValue("h", Rcode::RESERVED15_CODE));
- addClassVariable(rcode_type, "BADVERS_CODE",
- Py_BuildValue("h", Rcode::BADVERS_CODE));
-
- return (true);
+const Rcode&
+PyRcode_ToRcode(const PyObject* rcode_obj) {
+ if (rcode_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Rcode PyObject conversion");
+ }
+ const s_Rcode* rcode = static_cast<const s_Rcode*>(rcode_obj);
+ return (*rcode->cppobj);
}
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/rcode_python.h b/src/lib/dns/python/rcode_python.h
index 9b5e699..a149406 100644
--- a/src/lib/dns/python/rcode_python.h
+++ b/src/lib/dns/python/rcode_python.h
@@ -23,29 +23,36 @@ class Rcode;
namespace python {
-// The s_* Class simply covers one instantiation of the object.
-//
-// We added a helper variable static_code here
-// Since we can create Rcodes dynamically with Rcode(int), but also
-// use the static globals (Rcode::NOERROR() etc), we use this
-// variable to see if the code came from one of the latter, in which
-// case Rcode_destroy should not free it (the other option is to
-// allocate new Rcodes for every use of the static ones, but this
-// seems more efficient).
-//
-// Follow-up note: we don't have to use the proxy function in the python lib;
-// we can just define class specific constants directly (see TSIGError).
-// We should make this cleanup later.
-class s_Rcode : public PyObject {
-public:
- s_Rcode();
- const Rcode* cppobj;
- bool static_code;
-};
-
extern PyTypeObject rcode_type;
-bool initModulePart_Rcode(PyObject* mod);
+/// This is a simple shortcut to create a python Rcode object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRcodeObject(const Rcode& source);
+
+/// \brief Checks if the given python object is a Rcode object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Rcode, false otherwise
+bool PyRcode_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Rcode object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Rcode; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRcode_Check()
+///
+/// \note This is not a copy; if the Rcode is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rcode_obj The rcode object to convert
+const Rcode& PyRcode_ToRcode(const PyObject* rcode_obj);
} // namespace python
} // namespace dns
diff --git a/src/lib/dns/python/rdata_python.cc b/src/lib/dns/python/rdata_python.cc
index faa4f4c..06c0263 100644
--- a/src/lib/dns/python/rdata_python.cc
+++ b/src/lib/dns/python/rdata_python.cc
@@ -12,60 +12,48 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
#include <dns/rdata.h>
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rdata_python.h"
+#include "rrtype_python.h"
+#include "rrclass_python.h"
+#include "messagerenderer_python.h"
+
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
using namespace isc::dns::rdata;
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRdataLength;
-static PyObject* po_InvalidRdataText;
-static PyObject* po_CharStringTooLong;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// Rdata
-//
-
-// The s_* Class simply coverst one instantiation of the object
-
-// Using a shared_ptr here should not really be necessary (PyObject
-// is already reference-counted), however internally on the cpp side,
-// not doing so might result in problems, since we can't copy construct
-// rdata field, adding them to rrsets results in a problem when the
-// rrset is destroyed later
+namespace {
class s_Rdata : public PyObject {
public:
- RdataPtr rdata;
+ isc::dns::rdata::ConstRdataPtr cppobj;
};
+typedef CPPPyObjectContainer<s_Rdata, Rdata> RdataContainer;
+
//
// We declare the functions here, the definitions are below
// the type definition of the object, since both can use the other
//
// General creation and destruction
-static int Rdata_init(s_Rdata* self, PyObject* args);
-static void Rdata_destroy(s_Rdata* self);
+int Rdata_init(s_Rdata* self, PyObject* args);
+void Rdata_destroy(s_Rdata* self);
// These are the functions we export
-static PyObject* Rdata_toText(s_Rdata* self);
+PyObject* Rdata_toText(s_Rdata* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* Rdata_str(PyObject* self);
-static PyObject* Rdata_toWire(s_Rdata* self, PyObject* args);
-static PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
+PyObject* Rdata_str(PyObject* self);
+PyObject* Rdata_toWire(s_Rdata* self, PyObject* args);
+PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -73,7 +61,7 @@ static PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef Rdata_methods[] = {
+PyMethodDef Rdata_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(Rdata_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(Rdata_toWire), METH_VARARGS,
@@ -86,64 +74,10 @@ static PyMethodDef Rdata_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Rdata
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rdata_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Rdata",
- sizeof(s_Rdata), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Rdata_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Rdata_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Rdata class is an abstract base class that provides "
- "a set of common interfaces to manipulate concrete RDATA objects.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RData_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Rdata_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Rdata_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
Rdata_init(s_Rdata* self, PyObject* args) {
- s_RRType* rrtype;
- s_RRClass* rrclass;
+ PyObject* rrtype;
+ PyObject* rrclass;
const char* s;
const char* data;
Py_ssize_t len;
@@ -152,34 +86,36 @@ Rdata_init(s_Rdata* self, PyObject* args) {
if (PyArg_ParseTuple(args, "O!O!s", &rrtype_type, &rrtype,
&rrclass_type, &rrclass,
&s)) {
- self->rdata = createRdata(*rrtype->rrtype, *rrclass->rrclass, s);
+ self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+ PyRRClass_ToRRClass(rrclass), s);
return (0);
} else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype,
&rrclass_type, &rrclass, &data, &len)) {
InputBuffer input_buffer(data, len);
- self->rdata = createRdata(*rrtype->rrtype, *rrclass->rrclass,
- input_buffer, len);
+ self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+ PyRRClass_ToRRClass(rrclass),
+ input_buffer, len);
return (0);
}
return (-1);
}
-static void
+void
Rdata_destroy(s_Rdata* self) {
// Clear the shared_ptr so that its reference count is zero
// before we call tp_free() (there is no direct release())
- self->rdata.reset();
+ self->cppobj.reset();
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
Rdata_toText(s_Rdata* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rdata->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
Rdata_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -187,16 +123,16 @@ Rdata_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
Rdata_toWire(s_Rdata* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(4);
- self->rdata->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* rd_bytes = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
// We need to release the object we temporarily created here
@@ -204,7 +140,7 @@ Rdata_toWire(s_Rdata* self, PyObject* args) {
Py_DECREF(rd_bytes);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rdata->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -215,9 +151,7 @@ Rdata_toWire(s_Rdata* self, PyObject* args) {
return (NULL);
}
-
-
-static PyObject*
+PyObject*
RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
bool c;
@@ -229,24 +163,24 @@ RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
switch (op) {
case Py_LT:
- c = self->rdata->compare(*other->rdata) < 0;
+ c = self->cppobj->compare(*other->cppobj) < 0;
break;
case Py_LE:
- c = self->rdata->compare(*other->rdata) < 0 ||
- self->rdata->compare(*other->rdata) == 0;
+ c = self->cppobj->compare(*other->cppobj) < 0 ||
+ self->cppobj->compare(*other->cppobj) == 0;
break;
case Py_EQ:
- c = self->rdata->compare(*other->rdata) == 0;
+ c = self->cppobj->compare(*other->cppobj) == 0;
break;
case Py_NE:
- c = self->rdata->compare(*other->rdata) != 0;
+ c = self->cppobj->compare(*other->cppobj) != 0;
break;
case Py_GT:
- c = self->rdata->compare(*other->rdata) > 0;
+ c = self->cppobj->compare(*other->cppobj) > 0;
break;
case Py_GE:
- c = self->rdata->compare(*other->rdata) > 0 ||
- self->rdata->compare(*other->rdata) == 0;
+ c = self->cppobj->compare(*other->cppobj) > 0 ||
+ self->cppobj->compare(*other->cppobj) == 0;
break;
default:
PyErr_SetString(PyExc_IndexError,
@@ -258,32 +192,107 @@ RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
else
Py_RETURN_FALSE;
}
-// end of Rdata
+} // end of unnamed namespace
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Rdata(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rdata_type) < 0) {
- return (false);
- }
- Py_INCREF(&rdata_type);
- PyModule_AddObject(mod, "Rdata",
- reinterpret_cast<PyObject*>(&rdata_type));
+namespace isc {
+namespace dns {
+namespace python {
- // Add the exceptions to the class
- po_InvalidRdataLength = PyErr_NewException("pydnspp.InvalidRdataLength", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRdataLength", po_InvalidRdataLength);
- po_InvalidRdataText = PyErr_NewException("pydnspp.InvalidRdataText", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRdataText", po_InvalidRdataText);
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp
+//
+PyObject* po_InvalidRdataLength;
+PyObject* po_InvalidRdataText;
+PyObject* po_CharStringTooLong;
- po_CharStringTooLong = PyErr_NewException("pydnspp.CharStringTooLong", NULL, NULL);
- PyModule_AddObject(mod, "CharStringTooLong", po_CharStringTooLong);
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Rdata
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rdata_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Rdata",
+ sizeof(s_Rdata), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Rdata_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Rdata_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Rdata class is an abstract base class that provides "
+ "a set of common interfaces to manipulate concrete RDATA objects.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RData_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Rdata_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Rdata_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
-
- return (true);
+PyObject*
+createRdataObject(ConstRdataPtr source) {
+ s_Rdata* py_rdata =
+ static_cast<s_Rdata*>(rdata_type.tp_alloc(&rdata_type, 0));
+ if (py_rdata == NULL) {
+ isc_throw(PyCPPWrapperException, "Unexpected NULL C++ object, "
+ "probably due to short memory");
+ }
+ py_rdata->cppobj = source;
+ return (py_rdata);
}
+
+bool
+PyRdata_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rdata_type));
+}
+
+const Rdata&
+PyRdata_ToRdata(const PyObject* rdata_obj) {
+ if (rdata_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Rdata PyObject conversion");
+ }
+ const s_Rdata* rdata = static_cast<const s_Rdata*>(rdata_obj);
+ return (*rdata->cppobj);
+}
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/rdata_python.h b/src/lib/dns/python/rdata_python.h
new file mode 100644
index 0000000..c7ddd57
--- /dev/null
+++ b/src/lib/dns/python/rdata_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RDATA_H
+#define __PYTHON_RDATA_H 1
+
+#include <Python.h>
+
+#include <dns/rdata.h>
+
+namespace isc {
+namespace dns {
+namespace python {
+
+extern PyObject* po_InvalidRdataLength;
+extern PyObject* po_InvalidRdataText;
+extern PyObject* po_CharStringTooLong;
+
+extern PyTypeObject rdata_type;
+
+/// This is a simple shortcut to create a python Rdata object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRdataObject(isc::dns::rdata::ConstRdataPtr source);
+
+/// \brief Checks if the given python object is a Rdata object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Rdata, false otherwise
+bool PyRdata_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Rdata object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Rdata; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRdata_Check()
+///
+/// \note This is not a copy; if the Rdata is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rdata_obj The rdata object to convert
+const isc::dns::rdata::Rdata& PyRdata_ToRdata(const PyObject* rdata_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RDATA_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrclass_python.cc b/src/lib/dns/python/rrclass_python.cc
index 6d150c2..0014187 100644
--- a/src/lib/dns/python/rrclass_python.cc
+++ b/src/lib/dns/python/rrclass_python.cc
@@ -11,35 +11,28 @@
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
#include <dns/rrclass.h>
-using namespace isc::dns;
-using namespace isc::util;
-
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRClass;
-static PyObject* po_IncompleteRRClass;
-
-//
-// Definition of the classes
-//
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+#include "rrclass_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
-//
-// RRClass
-//
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+namespace {
// The s_* Class simply covers one instantiation of the object
class s_RRClass : public PyObject {
public:
- RRClass* rrclass;
+ s_RRClass() : cppobj(NULL) {};
+ RRClass* cppobj;
};
//
@@ -48,25 +41,26 @@ public:
//
// General creation and destruction
-static int RRClass_init(s_RRClass* self, PyObject* args);
-static void RRClass_destroy(s_RRClass* self);
+int RRClass_init(s_RRClass* self, PyObject* args);
+void RRClass_destroy(s_RRClass* self);
// These are the functions we export
-static PyObject* RRClass_toText(s_RRClass* self);
+PyObject* RRClass_toText(s_RRClass* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* RRClass_str(PyObject* self);
-static PyObject* RRClass_toWire(s_RRClass* self, PyObject* args);
-static PyObject* RRClass_getCode(s_RRClass* self);
-static PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
+PyObject* RRClass_str(PyObject* self);
+PyObject* RRClass_toWire(s_RRClass* self, PyObject* args);
+PyObject* RRClass_getCode(s_RRClass* self);
+PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
// Static function for direct class creation
-static PyObject* RRClass_IN(s_RRClass *self);
-static PyObject* RRClass_CH(s_RRClass *self);
-static PyObject* RRClass_HS(s_RRClass *self);
-static PyObject* RRClass_NONE(s_RRClass *self);
-static PyObject* RRClass_ANY(s_RRClass *self);
+PyObject* RRClass_IN(s_RRClass *self);
+PyObject* RRClass_CH(s_RRClass *self);
+PyObject* RRClass_HS(s_RRClass *self);
+PyObject* RRClass_NONE(s_RRClass *self);
+PyObject* RRClass_ANY(s_RRClass *self);
+typedef CPPPyObjectContainer<s_RRClass, RRClass> RRClassContainer;
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -74,7 +68,7 @@ static PyObject* RRClass_ANY(s_RRClass *self);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef RRClass_methods[] = {
+PyMethodDef RRClass_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(RRClass_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(RRClass_toWire), METH_VARARGS,
@@ -94,63 +88,7 @@ static PyMethodDef RRClass_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRClass
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrclass_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRClass",
- sizeof(s_RRClass), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRClass_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRClass_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The RRClass class encapsulates DNS resource record classes.\n"
- "This class manages the 16-bit integer class codes in quite a straightforward"
- "way. The only non trivial task is to handle textual representations of"
- "RR classes, such as \"IN\", \"CH\", or \"CLASS65534\".",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RRClass_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRClass_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRClass_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRClass_init(s_RRClass* self, PyObject* args) {
const char* s;
long i;
@@ -164,7 +102,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
// (the way to do exceptions is to set PyErr and return -1)
try {
if (PyArg_ParseTuple(args, "s", &s)) {
- self->rrclass = new RRClass(s);
+ self->cppobj = new RRClass(s);
return (0);
} else if (PyArg_ParseTuple(args, "l", &i)) {
if (i < 0 || i > 0xffff) {
@@ -173,7 +111,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
"RR class number out of range");
return (-1);
}
- self->rrclass = new RRClass(i);
+ self->cppobj = new RRClass(i);
return (0);
} else if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
uint8_t data[2];
@@ -182,7 +120,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
return (result);
}
InputBuffer ib(data, 2);
- self->rrclass = new RRClass(ib);
+ self->cppobj = new RRClass(ib);
PyErr_Clear();
return (0);
}
@@ -199,20 +137,20 @@ RRClass_init(s_RRClass* self, PyObject* args) {
return (-1);
}
-static void
+void
RRClass_destroy(s_RRClass* self) {
- delete self->rrclass;
- self->rrclass = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRClass_toText(s_RRClass* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rrclass->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
RRClass_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -220,16 +158,16 @@ RRClass_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRClass_toWire(s_RRClass* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(2);
- self->rrclass->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
// We need to release the object we temporarily created here
@@ -237,7 +175,7 @@ RRClass_toWire(s_RRClass* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrclass->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -248,12 +186,12 @@ RRClass_toWire(s_RRClass* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRClass_getCode(s_RRClass* self) {
- return (Py_BuildValue("I", self->rrclass->getCode()));
+ return (Py_BuildValue("I", self->cppobj->getCode()));
}
-static PyObject*
+PyObject*
RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
bool c;
@@ -265,24 +203,24 @@ RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
switch (op) {
case Py_LT:
- c = *self->rrclass < *other->rrclass;
+ c = *self->cppobj < *other->cppobj;
break;
case Py_LE:
- c = *self->rrclass < *other->rrclass ||
- *self->rrclass == *other->rrclass;
+ c = *self->cppobj < *other->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
case Py_EQ:
- c = *self->rrclass == *other->rrclass;
+ c = *self->cppobj == *other->cppobj;
break;
case Py_NE:
- c = *self->rrclass != *other->rrclass;
+ c = *self->cppobj != *other->cppobj;
break;
case Py_GT:
- c = *other->rrclass < *self->rrclass;
+ c = *other->cppobj < *self->cppobj;
break;
case Py_GE:
- c = *other->rrclass < *self->rrclass ||
- *self->rrclass == *other->rrclass;
+ c = *other->cppobj < *self->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
default:
PyErr_SetString(PyExc_IndexError,
@@ -298,56 +236,131 @@ RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
//
// Common function for RRClass_IN/CH/etc.
//
-static PyObject* RRClass_createStatic(RRClass stc) {
+PyObject* RRClass_createStatic(RRClass stc) {
s_RRClass* ret = PyObject_New(s_RRClass, &rrclass_type);
if (ret != NULL) {
- ret->rrclass = new RRClass(stc);
+ ret->cppobj = new RRClass(stc);
}
return (ret);
}
-static PyObject* RRClass_IN(s_RRClass*) {
+PyObject* RRClass_IN(s_RRClass*) {
return (RRClass_createStatic(RRClass::IN()));
}
-static PyObject* RRClass_CH(s_RRClass*) {
+PyObject* RRClass_CH(s_RRClass*) {
return (RRClass_createStatic(RRClass::CH()));
}
-static PyObject* RRClass_HS(s_RRClass*) {
+PyObject* RRClass_HS(s_RRClass*) {
return (RRClass_createStatic(RRClass::HS()));
}
-static PyObject* RRClass_NONE(s_RRClass*) {
+PyObject* RRClass_NONE(s_RRClass*) {
return (RRClass_createStatic(RRClass::NONE()));
}
-static PyObject* RRClass_ANY(s_RRClass*) {
+PyObject* RRClass_ANY(s_RRClass*) {
return (RRClass_createStatic(RRClass::ANY()));
}
-// end of RRClass
+
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_InvalidRRClass;
+PyObject* po_IncompleteRRClass;
+
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRClass
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrclass_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRClass",
+ sizeof(s_RRClass), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRClass_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRClass_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The RRClass class encapsulates DNS resource record classes.\n"
+ "This class manages the 16-bit integer class codes in quite a straightforward"
+ "way. The only non trivial task is to handle textual representations of"
+ "RR classes, such as \"IN\", \"CH\", or \"CLASS65534\".",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RRClass_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRClass_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRClass_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createRRClassObject(const RRClass& source) {
+ RRClassContainer container(PyObject_New(s_RRClass, &rrclass_type));
+ container.set(new RRClass(source));
+ return (container.release());
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_RRClass(PyObject* mod) {
- // Add the exceptions to the module
- po_InvalidRRClass = PyErr_NewException("pydnspp.InvalidRRClass", NULL, NULL);
- Py_INCREF(po_InvalidRRClass);
- PyModule_AddObject(mod, "InvalidRRClass", po_InvalidRRClass);
- po_IncompleteRRClass = PyErr_NewException("pydnspp.IncompleteRRClass", NULL, NULL);
- Py_INCREF(po_IncompleteRRClass);
- PyModule_AddObject(mod, "IncompleteRRClass", po_IncompleteRRClass);
-
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rrclass_type) < 0) {
- return (false);
+PyRRClass_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&rrclass_type);
- PyModule_AddObject(mod, "RRClass",
- reinterpret_cast<PyObject*>(&rrclass_type));
-
- return (true);
+ return (PyObject_TypeCheck(obj, &rrclass_type));
}
+
+const RRClass&
+PyRRClass_ToRRClass(const PyObject* rrclass_obj) {
+ if (rrclass_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRClass PyObject conversion");
+ }
+ const s_RRClass* rrclass = static_cast<const s_RRClass*>(rrclass_obj);
+ return (*rrclass->cppobj);
+}
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/rrclass_python.h b/src/lib/dns/python/rrclass_python.h
new file mode 100644
index 0000000..f58bba6
--- /dev/null
+++ b/src/lib/dns/python/rrclass_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRCLASS_H
+#define __PYTHON_RRCLASS_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRClass;
+
+namespace python {
+
+extern PyObject* po_InvalidRRClass;
+extern PyObject* po_IncompleteRRClass;
+
+extern PyTypeObject rrclass_type;
+
+/// This is a simple shortcut to create a python RRClass object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRClassObject(const RRClass& source);
+
+/// \brief Checks if the given python object is a RRClass object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRClass, false otherwise
+bool PyRRClass_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRClass object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRClass; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRClass_Check()
+///
+/// \note This is not a copy; if the RRClass is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrclass_obj The rrclass object to convert
+const RRClass& PyRRClass_ToRRClass(const PyObject* rrclass_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRCLASS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrset_python.cc b/src/lib/dns/python/rrset_python.cc
index 71a0710..9fc3d79 100644
--- a/src/lib/dns/python/rrset_python.cc
+++ b/src/lib/dns/python/rrset_python.cc
@@ -12,55 +12,63 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <dns/rrset.h>
+#include <Python.h>
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the module init at the
-// end
-//
-static PyObject* po_EmptyRRset;
+#include <util/python/pycppwrapper_util.h>
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+#include <dns/rrset.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+
+#include "name_python.h"
+#include "pydnspp_common.h"
+#include "rrset_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rdata_python.h"
+#include "messagerenderer_python.h"
+
+using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
+
+namespace {
-// RRset
+// The s_* Class simply coverst one instantiation of the object
// Using a shared_ptr here should not really be necessary (PyObject
// is already reference-counted), however internally on the cpp side,
// not doing so might result in problems, since we can't copy construct
-// rrsets, adding them to messages results in a problem when the
-// message is destroyed or cleared later
+// rdata field, adding them to rrsets results in a problem when the
+// rrset is destroyed later
class s_RRset : public PyObject {
public:
- RRsetPtr rrset;
+ isc::dns::RRsetPtr cppobj;
};
-static int RRset_init(s_RRset* self, PyObject* args);
-static void RRset_destroy(s_RRset* self);
-
-static PyObject* RRset_getRdataCount(s_RRset* self);
-static PyObject* RRset_getName(s_RRset* self);
-static PyObject* RRset_getClass(s_RRset* self);
-static PyObject* RRset_getType(s_RRset* self);
-static PyObject* RRset_getTTL(s_RRset* self);
-static PyObject* RRset_setName(s_RRset* self, PyObject* args);
-static PyObject* RRset_setTTL(s_RRset* self, PyObject* args);
-static PyObject* RRset_toText(s_RRset* self);
-static PyObject* RRset_str(PyObject* self);
-static PyObject* RRset_toWire(s_RRset* self, PyObject* args);
-static PyObject* RRset_addRdata(s_RRset* self, PyObject* args);
-static PyObject* RRset_getRdata(s_RRset* self);
+int RRset_init(s_RRset* self, PyObject* args);
+void RRset_destroy(s_RRset* self);
+
+PyObject* RRset_getRdataCount(s_RRset* self);
+PyObject* RRset_getName(s_RRset* self);
+PyObject* RRset_getClass(s_RRset* self);
+PyObject* RRset_getType(s_RRset* self);
+PyObject* RRset_getTTL(s_RRset* self);
+PyObject* RRset_setName(s_RRset* self, PyObject* args);
+PyObject* RRset_setTTL(s_RRset* self, PyObject* args);
+PyObject* RRset_toText(s_RRset* self);
+PyObject* RRset_str(PyObject* self);
+PyObject* RRset_toWire(s_RRset* self, PyObject* args);
+PyObject* RRset_addRdata(s_RRset* self, PyObject* args);
+PyObject* RRset_getRdata(s_RRset* self);
+PyObject* RRset_removeRRsig(s_RRset* self);
+
// TODO: iterator?
-static PyMethodDef RRset_methods[] = {
+PyMethodDef RRset_methods[] = {
{ "get_rdata_count", reinterpret_cast<PyCFunction>(RRset_getRdataCount), METH_NOARGS,
"Returns the number of rdata fields." },
{ "get_name", reinterpret_cast<PyCFunction>(RRset_getName), METH_NOARGS,
@@ -88,208 +96,142 @@ static PyMethodDef RRset_methods[] = {
"Adds the rdata for one RR to the RRset.\nTakes an Rdata object as an argument" },
{ "get_rdata", reinterpret_cast<PyCFunction>(RRset_getRdata), METH_NOARGS,
"Returns a List containing all Rdata elements" },
+ { "remove_rrsig", reinterpret_cast<PyCFunction>(RRset_removeRRsig), METH_NOARGS,
+ "Clears the list of RRsigs for this RRset" },
{ NULL, NULL, 0, NULL }
};
-static PyTypeObject rrset_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRset",
- sizeof(s_RRset), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRset_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRset_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The AbstractRRset class is an abstract base class that "
- "models a DNS RRset.\n\n"
- "An object of (a specific derived class of) AbstractRRset "
- "models an RRset as described in the DNS standard:\n"
- "A set of DNS resource records (RRs) of the same type and class. "
- "The standard requires the TTL of all RRs in an RRset be the same; "
- "this class follows that requirement.\n\n"
- "Note about duplicate RDATA: RFC2181 states that it's meaningless that an "
- "RRset contains two identical RRs and that name servers should suppress "
- "such duplicates.\n"
- "This class is not responsible for ensuring this requirement: For example, "
- "addRdata() method doesn't check if there's already RDATA identical "
- "to the one being added.\n"
- "This is because such checks can be expensive, and it's often easy to "
- "ensure the uniqueness requirement at the %data preparation phase "
- "(e.g. when loading a zone).",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRset_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRset_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRset_init(s_RRset* self, PyObject* args) {
- s_Name* name;
- s_RRClass* rrclass;
- s_RRType* rrtype;
- s_RRTTL* rrttl;
+ PyObject* name;
+ PyObject* rrclass;
+ PyObject* rrtype;
+ PyObject* rrttl;
if (PyArg_ParseTuple(args, "O!O!O!O!", &name_type, &name,
&rrclass_type, &rrclass,
&rrtype_type, &rrtype,
&rrttl_type, &rrttl
)) {
- self->rrset = RRsetPtr(new RRset(*name->cppobj, *rrclass->rrclass,
- *rrtype->rrtype, *rrttl->rrttl));
+ self->cppobj = RRsetPtr(new RRset(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype),
+ PyRRTTL_ToRRTTL(rrttl)));
return (0);
}
- self->rrset = RRsetPtr();
+ self->cppobj = RRsetPtr();
return (-1);
}
-static void
+void
RRset_destroy(s_RRset* self) {
// Clear the shared_ptr so that its reference count is zero
// before we call tp_free() (there is no direct release())
- self->rrset.reset();
+ self->cppobj.reset();
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRset_getRdataCount(s_RRset* self) {
- return (Py_BuildValue("I", self->rrset->getRdataCount()));
+ return (Py_BuildValue("I", self->cppobj->getRdataCount()));
}
-static PyObject*
+PyObject*
RRset_getName(s_RRset* self) {
- s_Name* name;
-
- // is this the best way to do this?
- name = static_cast<s_Name*>(name_type.tp_alloc(&name_type, 0));
- if (name != NULL) {
- name->cppobj = new Name(self->rrset->getName());
- if (name->cppobj == NULL)
- {
- Py_DECREF(name);
- return (NULL);
- }
+ try {
+ return (createNameObject(self->cppobj->getName()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting rrset Name: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting rrset Name");
}
-
- return (name);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_getClass(s_RRset* self) {
- s_RRClass* rrclass;
-
- rrclass = static_cast<s_RRClass*>(rrclass_type.tp_alloc(&rrclass_type, 0));
- if (rrclass != NULL) {
- rrclass->rrclass = new RRClass(self->rrset->getClass());
- if (rrclass->rrclass == NULL)
- {
- Py_DECREF(rrclass);
- return (NULL);
- }
+ try {
+ return (createRRClassObject(self->cppobj->getClass()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRClass: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRClass");
}
-
- return (rrclass);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_getType(s_RRset* self) {
- s_RRType* rrtype;
-
- rrtype = static_cast<s_RRType*>(rrtype_type.tp_alloc(&rrtype_type, 0));
- if (rrtype != NULL) {
- rrtype->rrtype = new RRType(self->rrset->getType());
- if (rrtype->rrtype == NULL)
- {
- Py_DECREF(rrtype);
- return (NULL);
- }
+ try {
+ return (createRRTypeObject(self->cppobj->getType()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRType: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRType");
}
-
- return (rrtype);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_getTTL(s_RRset* self) {
- s_RRTTL* rrttl;
-
- rrttl = static_cast<s_RRTTL*>(rrttl_type.tp_alloc(&rrttl_type, 0));
- if (rrttl != NULL) {
- rrttl->rrttl = new RRTTL(self->rrset->getTTL());
- if (rrttl->rrttl == NULL)
- {
- Py_DECREF(rrttl);
- return (NULL);
- }
+ try {
+ return (createRRTTLObject(self->cppobj->getTTL()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question TTL: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question TTL");
}
-
- return (rrttl);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_setName(s_RRset* self, PyObject* args) {
- s_Name* name;
+ PyObject* name;
if (!PyArg_ParseTuple(args, "O!", &name_type, &name)) {
return (NULL);
}
- self->rrset->setName(*name->cppobj);
+ self->cppobj->setName(PyName_ToName(name));
Py_RETURN_NONE;
}
-static PyObject*
+PyObject*
RRset_setTTL(s_RRset* self, PyObject* args) {
- s_RRTTL* rrttl;
+ PyObject* rrttl;
if (!PyArg_ParseTuple(args, "O!", &rrttl_type, &rrttl)) {
return (NULL);
}
- self->rrset->setTTL(*rrttl->rrttl);
+ self->cppobj->setTTL(PyRRTTL_ToRRTTL(rrttl));
Py_RETURN_NONE;
}
-static PyObject*
+PyObject*
RRset_toText(s_RRset* self) {
try {
- return (Py_BuildValue("s", self->rrset->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
} catch (const EmptyRRset& ers) {
PyErr_SetString(po_EmptyRRset, ers.what());
return (NULL);
}
}
-static PyObject*
+PyObject*
RRset_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -297,17 +239,17 @@ RRset_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRset_toWire(s_RRset* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
+ PyObject* mr;
try {
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(4096);
- self->rrset->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
// We need to release the object we temporarily created here
@@ -315,7 +257,7 @@ RRset_toWire(s_RRset* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrset->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -331,14 +273,14 @@ RRset_toWire(s_RRset* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRset_addRdata(s_RRset* self, PyObject* args) {
- s_Rdata* rdata;
+ PyObject* rdata;
if (!PyArg_ParseTuple(args, "O!", &rdata_type, &rdata)) {
return (NULL);
}
try {
- self->rrset->addRdata(*rdata->rdata);
+ self->cppobj->addRdata(PyRdata_ToRdata(rdata));
Py_RETURN_NONE;
} catch (const std::bad_cast&) {
PyErr_Clear();
@@ -348,55 +290,173 @@ RRset_addRdata(s_RRset* self, PyObject* args) {
}
}
-static PyObject*
+PyObject*
RRset_getRdata(s_RRset* self) {
PyObject* list = PyList_New(0);
- RdataIteratorPtr it = self->rrset->getRdataIterator();
-
- for (; !it->isLast(); it->next()) {
- s_Rdata *rds = static_cast<s_Rdata*>(rdata_type.tp_alloc(&rdata_type, 0));
- if (rds != NULL) {
- // hmz them iterators/shared_ptrs and private constructors
- // make this a bit weird, so we create a new one with
- // the data available
- const Rdata *rd = &it->getCurrent();
- rds->rdata = createRdata(self->rrset->getType(), self->rrset->getClass(), *rd);
- PyList_Append(list, rds);
- } else {
- return (NULL);
+ RdataIteratorPtr it = self->cppobj->getRdataIterator();
+
+ try {
+ for (; !it->isLast(); it->next()) {
+ const rdata::Rdata *rd = &it->getCurrent();
+ if (PyList_Append(list,
+ createRdataObject(createRdata(self->cppobj->getType(),
+ self->cppobj->getClass(), *rd))) == -1) {
+ Py_DECREF(list);
+ return (NULL);
+ }
}
+ return (list);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting rrset Rdata: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting rrset Rdata");
}
-
- return (list);
+ Py_DECREF(list);
+ return (NULL);
}
-// end of RRset
+PyObject*
+RRset_removeRRsig(s_RRset* self) {
+ self->cppobj->removeRRsig();
+ Py_RETURN_NONE;
+}
+} // end of unnamed namespace
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_RRset(PyObject* mod) {
- // Add the exceptions to the module
- po_EmptyRRset = PyErr_NewException("pydnspp.EmptyRRset", NULL, NULL);
- PyModule_AddObject(mod, "EmptyRRset", po_EmptyRRset);
+namespace isc {
+namespace dns {
+namespace python {
- // Add the enums to the module
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the module init at the
+// end
+//
+PyObject* po_EmptyRRset;
- // Add the constants to the module
+PyTypeObject rrset_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRset",
+ sizeof(s_RRset), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRset_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRset_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The AbstractRRset class is an abstract base class that "
+ "models a DNS RRset.\n\n"
+ "An object of (a specific derived class of) AbstractRRset "
+ "models an RRset as described in the DNS standard:\n"
+ "A set of DNS resource records (RRs) of the same type and class. "
+ "The standard requires the TTL of all RRs in an RRset be the same; "
+ "this class follows that requirement.\n\n"
+ "Note about duplicate RDATA: RFC2181 states that it's meaningless that an "
+ "RRset contains two identical RRs and that name servers should suppress "
+ "such duplicates.\n"
+ "This class is not responsible for ensuring this requirement: For example, "
+ "addRdata() method doesn't check if there's already RDATA identical "
+ "to the one being added.\n"
+ "This is because such checks can be expensive, and it's often easy to "
+ "ensure the uniqueness requirement at the %data preparation phase "
+ "(e.g. when loading a zone).",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRset_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRset_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createRRsetObject(const RRset& source) {
- // Add the classes to the module
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module
+ // RRsets are noncopyable, so as a workaround we recreate a new one
+ // and copy over all content
+ RRsetPtr new_rrset = isc::dns::RRsetPtr(
+ new isc::dns::RRset(source.getName(), source.getClass(),
+ source.getType(), source.getTTL()));
- // NameComparisonResult
- if (PyType_Ready(&rrset_type) < 0) {
- return (false);
+ isc::dns::RdataIteratorPtr rdata_it(source.getRdataIterator());
+ for (rdata_it->first(); !rdata_it->isLast(); rdata_it->next()) {
+ new_rrset->addRdata(rdata_it->getCurrent());
+ }
+
+ isc::dns::RRsetPtr sigs = source.getRRsig();
+ if (sigs) {
+ new_rrset->addRRsig(sigs);
+ }
+ s_RRset* py_rrset =
+ static_cast<s_RRset*>(rrset_type.tp_alloc(&rrset_type, 0));
+ if (py_rrset == NULL) {
+ isc_throw(PyCPPWrapperException, "Unexpected NULL C++ object, "
+ "probably due to short memory");
}
- Py_INCREF(&rrset_type);
- PyModule_AddObject(mod, "RRset",
- reinterpret_cast<PyObject*>(&rrset_type));
-
- return (true);
+ py_rrset->cppobj = new_rrset;
+ return (py_rrset);
}
+bool
+PyRRset_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rrset_type));
+}
+
+RRset&
+PyRRset_ToRRset(PyObject* rrset_obj) {
+ s_RRset* rrset = static_cast<s_RRset*>(rrset_obj);
+ return (*rrset->cppobj);
+}
+
+RRsetPtr
+PyRRset_ToRRsetPtr(PyObject* rrset_obj) {
+ if (rrset_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRset PyObject conversion");
+ }
+ s_RRset* rrset = static_cast<s_RRset*>(rrset_obj);
+ return (rrset->cppobj);
+}
+
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/rrset_python.h b/src/lib/dns/python/rrset_python.h
new file mode 100644
index 0000000..4268678
--- /dev/null
+++ b/src/lib/dns/python/rrset_python.h
@@ -0,0 +1,78 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRSET_H
+#define __PYTHON_RRSET_H 1
+
+#include <Python.h>
+
+#include <dns/rrset.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+namespace isc {
+namespace dns {
+namespace python {
+
+extern PyObject* po_EmptyRRset;
+
+extern PyTypeObject rrset_type;
+
+/// This is a simple shortcut to create a python RRset object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRsetObject(const RRset& source);
+
+/// \brief Checks if the given python object is a RRset object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRset, false otherwise
+bool PyRRset_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRset object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRset; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRset_Check()
+///
+/// \note This is not a copy; if the RRset is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrset_obj The rrset object to convert
+RRset& PyRRset_ToRRset(PyObject* rrset_obj);
+
+/// \brief Returns the shared_ptr of the RRset object contained within the
+/// given Python object.
+///
+/// \note The given object MUST be of type RRset; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRset_Check()
+///
+/// \param rrset_obj The rrset object to convert
+RRsetPtr PyRRset_ToRRsetPtr(PyObject* rrset_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRSET_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrttl_python.cc b/src/lib/dns/python/rrttl_python.cc
index c4b25bf..3a3f067 100644
--- a/src/lib/dns/python/rrttl_python.cc
+++ b/src/lib/dns/python/rrttl_python.cc
@@ -12,57 +12,41 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
#include <vector>
#include <dns/rrttl.h>
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rrttl_python.h"
+#include "pydnspp_common.h"
+#include "messagerenderer_python.h"
using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRTTL;
-static PyObject* po_IncompleteRRTTL;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// RRTTL
-//
-
+namespace {
// The s_* Class simply covers one instantiation of the object
class s_RRTTL : public PyObject {
public:
- RRTTL* rrttl;
+ s_RRTTL() : cppobj(NULL) {};
+ isc::dns::RRTTL* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
-static int RRTTL_init(s_RRTTL* self, PyObject* args);
-static void RRTTL_destroy(s_RRTTL* self);
+typedef CPPPyObjectContainer<s_RRTTL, RRTTL> RRTTLContainer;
-// These are the functions we export
-static PyObject* RRTTL_toText(s_RRTTL* self);
+PyObject* RRTTL_toText(s_RRTTL* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* RRTTL_str(PyObject* self);
-static PyObject* RRTTL_toWire(s_RRTTL* self, PyObject* args);
-static PyObject* RRTTL_getValue(s_RRTTL* self);
-static PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
+PyObject* RRTTL_str(PyObject* self);
+PyObject* RRTTL_toWire(s_RRTTL* self, PyObject* args);
+PyObject* RRTTL_getValue(s_RRTTL* self);
+PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -70,7 +54,7 @@ static PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef RRTTL_methods[] = {
+PyMethodDef RRTTL_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(RRTTL_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(RRTTL_toWire), METH_VARARGS,
@@ -85,65 +69,7 @@ static PyMethodDef RRTTL_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRTTL
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrttl_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRTTL",
- sizeof(s_RRTTL), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRTTL_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRTTL_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The RRTTL class encapsulates TTLs used in DNS resource records.\n\n"
- "This is a straightforward class; an RRTTL object simply maintains a "
- "32-bit unsigned integer corresponding to the TTL value. The main purpose "
- "of this class is to provide convenient interfaces to convert a textual "
- "representation into the integer TTL value and vice versa, and to handle "
- "wire-format representations.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RRTTL_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRTTL_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRTTL_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRTTL_init(s_RRTTL* self, PyObject* args) {
const char* s;
long long i;
@@ -157,7 +83,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
// (the way to do exceptions is to set PyErr and return -1)
try {
if (PyArg_ParseTuple(args, "s", &s)) {
- self->rrttl = new RRTTL(s);
+ self->cppobj = new RRTTL(s);
return (0);
} else if (PyArg_ParseTuple(args, "L", &i)) {
PyErr_Clear();
@@ -165,7 +91,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
PyErr_SetString(PyExc_ValueError, "RR TTL number out of range");
return (-1);
}
- self->rrttl = new RRTTL(i);
+ self->cppobj = new RRTTL(i);
return (0);
} else if (PyArg_ParseTuple(args, "O", &bytes) &&
PySequence_Check(bytes)) {
@@ -176,7 +102,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
return (result);
}
InputBuffer ib(&data[0], size);
- self->rrttl = new RRTTL(ib);
+ self->cppobj = new RRTTL(ib);
PyErr_Clear();
return (0);
}
@@ -200,20 +126,20 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
return (-1);
}
-static void
+void
RRTTL_destroy(s_RRTTL* self) {
- delete self->rrttl;
- self->rrttl = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRTTL_toText(s_RRTTL* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rrttl->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
RRTTL_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -221,16 +147,16 @@ RRTTL_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRTTL_toWire(s_RRTTL* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(4);
- self->rrttl->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()),
buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
@@ -239,7 +165,7 @@ RRTTL_toWire(s_RRTTL* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrttl->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -250,12 +176,12 @@ RRTTL_toWire(s_RRTTL* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRTTL_getValue(s_RRTTL* self) {
- return (Py_BuildValue("I", self->rrttl->getValue()));
+ return (Py_BuildValue("I", self->cppobj->getValue()));
}
-static PyObject*
+PyObject*
RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
bool c = false;
@@ -267,24 +193,24 @@ RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
switch (op) {
case Py_LT:
- c = *self->rrttl < *other->rrttl;
+ c = *self->cppobj < *other->cppobj;
break;
case Py_LE:
- c = *self->rrttl < *other->rrttl ||
- *self->rrttl == *other->rrttl;
+ c = *self->cppobj < *other->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
case Py_EQ:
- c = *self->rrttl == *other->rrttl;
+ c = *self->cppobj == *other->cppobj;
break;
case Py_NE:
- c = *self->rrttl != *other->rrttl;
+ c = *self->cppobj != *other->cppobj;
break;
case Py_GT:
- c = *other->rrttl < *self->rrttl;
+ c = *other->cppobj < *self->cppobj;
break;
case Py_GE:
- c = *other->rrttl < *self->rrttl ||
- *self->rrttl == *other->rrttl;
+ c = *other->cppobj < *self->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
}
if (c)
@@ -292,27 +218,104 @@ RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
else
Py_RETURN_FALSE;
}
-// end of RRTTL
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_InvalidRRTTL;
+PyObject* po_IncompleteRRTTL;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRTTL
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrttl_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRTTL",
+ sizeof(s_RRTTL), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRTTL_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRTTL_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The RRTTL class encapsulates TTLs used in DNS resource records.\n\n"
+ "This is a straightforward class; an RRTTL object simply maintains a "
+ "32-bit unsigned integer corresponding to the TTL value. The main purpose "
+ "of this class is to provide convenient interfaces to convert a textual "
+ "representation into the integer TTL value and vice versa, and to handle "
+ "wire-format representations.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RRTTL_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRTTL_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRTTL_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createRRTTLObject(const RRTTL& source) {
+ RRTTLContainer container(PyObject_New(s_RRTTL, &rrttl_type));
+ container.set(new RRTTL(source));
+ return (container.release());
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_RRTTL(PyObject* mod) {
- // Add the exceptions to the module
- po_InvalidRRTTL = PyErr_NewException("pydnspp.InvalidRRTTL", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRRTTL", po_InvalidRRTTL);
- po_IncompleteRRTTL = PyErr_NewException("pydnspp.IncompleteRRTTL", NULL, NULL);
- PyModule_AddObject(mod, "IncompleteRRTTL", po_IncompleteRRTTL);
+PyRRTTL_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rrttl_type));
+}
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rrttl_type) < 0) {
- return (false);
+const RRTTL&
+PyRRTTL_ToRRTTL(const PyObject* rrttl_obj) {
+ if (rrttl_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRTTL PyObject conversion");
}
- Py_INCREF(&rrttl_type);
- PyModule_AddObject(mod, "RRTTL",
- reinterpret_cast<PyObject*>(&rrttl_type));
-
- return (true);
+ const s_RRTTL* rrttl = static_cast<const s_RRTTL*>(rrttl_obj);
+ return (*rrttl->cppobj);
}
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/rrttl_python.h b/src/lib/dns/python/rrttl_python.h
new file mode 100644
index 0000000..9dbc982
--- /dev/null
+++ b/src/lib/dns/python/rrttl_python.h
@@ -0,0 +1,67 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRTTL_H
+#define __PYTHON_RRTTL_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRTTL;
+
+namespace python {
+
+extern PyObject* po_InvalidRRTTL;
+extern PyObject* po_IncompleteRRTTL;
+
+extern PyTypeObject rrttl_type;
+
+/// This is a simple shortcut to create a python RRTTL object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRTTLObject(const RRTTL& source);
+
+/// \brief Checks if the given python object is a RRTTL object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRTTL, false otherwise
+bool PyRRTTL_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRTTL object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRTTL; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRTTL_Check()
+///
+/// \note This is not a copy; if the RRTTL is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrttl_obj The rrttl object to convert
+const RRTTL& PyRRTTL_ToRRTTL(const PyObject* rrttl_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRTTL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrtype_python.cc b/src/lib/dns/python/rrtype_python.cc
index 00e0acd..bf20b7c 100644
--- a/src/lib/dns/python/rrtype_python.cc
+++ b/src/lib/dns/python/rrtype_python.cc
@@ -12,77 +12,64 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
#include <vector>
#include <dns/rrtype.h>
+#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rrtype_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRType;
-static PyObject* po_IncompleteRRType;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// RRType
-//
-
+namespace {
// The s_* Class simply covers one instantiation of the object
class s_RRType : public PyObject {
public:
- const RRType* rrtype;
+ const RRType* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
// General creation and destruction
-static int RRType_init(s_RRType* self, PyObject* args);
-static void RRType_destroy(s_RRType* self);
+int RRType_init(s_RRType* self, PyObject* args);
+void RRType_destroy(s_RRType* self);
// These are the functions we export
-static PyObject*
+PyObject*
RRType_toText(s_RRType* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* RRType_str(PyObject* self);
-static PyObject* RRType_toWire(s_RRType* self, PyObject* args);
-static PyObject* RRType_getCode(s_RRType* self);
-static PyObject* RRType_richcmp(s_RRType* self, s_RRType* other, int op);
-static PyObject* RRType_NSEC3PARAM(s_RRType *self);
-static PyObject* RRType_DNAME(s_RRType *self);
-static PyObject* RRType_PTR(s_RRType *self);
-static PyObject* RRType_MX(s_RRType *self);
-static PyObject* RRType_DNSKEY(s_RRType *self);
-static PyObject* RRType_TXT(s_RRType *self);
-static PyObject* RRType_RRSIG(s_RRType *self);
-static PyObject* RRType_NSEC(s_RRType *self);
-static PyObject* RRType_AAAA(s_RRType *self);
-static PyObject* RRType_DS(s_RRType *self);
-static PyObject* RRType_OPT(s_RRType *self);
-static PyObject* RRType_A(s_RRType *self);
-static PyObject* RRType_NS(s_RRType *self);
-static PyObject* RRType_CNAME(s_RRType *self);
-static PyObject* RRType_SOA(s_RRType *self);
-static PyObject* RRType_NSEC3(s_RRType *self);
-static PyObject* RRType_IXFR(s_RRType *self);
-static PyObject* RRType_AXFR(s_RRType *self);
-static PyObject* RRType_ANY(s_RRType *self);
+PyObject* RRType_str(PyObject* self);
+PyObject* RRType_toWire(s_RRType* self, PyObject* args);
+PyObject* RRType_getCode(s_RRType* self);
+PyObject* RRType_richcmp(s_RRType* self, s_RRType* other, int op);
+PyObject* RRType_NSEC3PARAM(s_RRType *self);
+PyObject* RRType_DNAME(s_RRType *self);
+PyObject* RRType_PTR(s_RRType *self);
+PyObject* RRType_MX(s_RRType *self);
+PyObject* RRType_DNSKEY(s_RRType *self);
+PyObject* RRType_TXT(s_RRType *self);
+PyObject* RRType_RRSIG(s_RRType *self);
+PyObject* RRType_NSEC(s_RRType *self);
+PyObject* RRType_AAAA(s_RRType *self);
+PyObject* RRType_DS(s_RRType *self);
+PyObject* RRType_OPT(s_RRType *self);
+PyObject* RRType_A(s_RRType *self);
+PyObject* RRType_NS(s_RRType *self);
+PyObject* RRType_CNAME(s_RRType *self);
+PyObject* RRType_SOA(s_RRType *self);
+PyObject* RRType_NSEC3(s_RRType *self);
+PyObject* RRType_IXFR(s_RRType *self);
+PyObject* RRType_AXFR(s_RRType *self);
+PyObject* RRType_ANY(s_RRType *self);
+
+typedef CPPPyObjectContainer<s_RRType, RRType> RRTypeContainer;
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -90,7 +77,7 @@ static PyObject* RRType_ANY(s_RRType *self);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef RRType_methods[] = {
+PyMethodDef RRType_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(RRType_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(RRType_toWire), METH_VARARGS,
@@ -124,63 +111,7 @@ static PyMethodDef RRType_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRType
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrtype_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRType",
- sizeof(s_RRType), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRType_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRType_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The RRType class encapsulates DNS resource record types.\n\n"
- "This class manages the 16-bit integer type codes in quite a straightforward "
- "way. The only non trivial task is to handle textual representations of "
- "RR types, such as \"A\", \"AAAA\", or \"TYPE65534\".",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RRType_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRType_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRType_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRType_init(s_RRType* self, PyObject* args) {
const char* s;
long i;
@@ -194,7 +125,7 @@ RRType_init(s_RRType* self, PyObject* args) {
// (the way to do exceptions is to set PyErr and return -1)
try {
if (PyArg_ParseTuple(args, "s", &s)) {
- self->rrtype = new RRType(s);
+ self->cppobj = new RRType(s);
return (0);
} else if (PyArg_ParseTuple(args, "l", &i)) {
PyErr_Clear();
@@ -202,7 +133,7 @@ RRType_init(s_RRType* self, PyObject* args) {
PyErr_SetString(PyExc_ValueError, "RR Type number out of range");
return (-1);
}
- self->rrtype = new RRType(i);
+ self->cppobj = new RRType(i);
return (0);
} else if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
Py_ssize_t size = PySequence_Size(bytes);
@@ -212,7 +143,7 @@ RRType_init(s_RRType* self, PyObject* args) {
return (result);
}
InputBuffer ib(&data[0], size);
- self->rrtype = new RRType(ib);
+ self->cppobj = new RRType(ib);
PyErr_Clear();
return (0);
}
@@ -236,36 +167,36 @@ RRType_init(s_RRType* self, PyObject* args) {
return (-1);
}
-static void
+void
RRType_destroy(s_RRType* self) {
- delete self->rrtype;
- self->rrtype = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRType_toText(s_RRType* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rrtype->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
RRType_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self, const_cast<char*>("to_text"),
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRType_toWire(s_RRType* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
+ PyObject* mr;
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
OutputBuffer buffer(2);
- self->rrtype->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
// We need to release the object we temporarily created here
@@ -273,7 +204,7 @@ RRType_toWire(s_RRType* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrtype->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -284,12 +215,12 @@ RRType_toWire(s_RRType* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRType_getCode(s_RRType* self) {
- return (Py_BuildValue("I", self->rrtype->getCode()));
+ return (Py_BuildValue("I", self->cppobj->getCode()));
}
-static PyObject*
+PyObject*
RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
bool c;
@@ -301,24 +232,24 @@ RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
switch (op) {
case Py_LT:
- c = *self->rrtype < *other->rrtype;
+ c = *self->cppobj < *other->cppobj;
break;
case Py_LE:
- c = *self->rrtype < *other->rrtype ||
- *self->rrtype == *other->rrtype;
+ c = *self->cppobj < *other->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
case Py_EQ:
- c = *self->rrtype == *other->rrtype;
+ c = *self->cppobj == *other->cppobj;
break;
case Py_NE:
- c = *self->rrtype != *other->rrtype;
+ c = *self->cppobj != *other->cppobj;
break;
case Py_GT:
- c = *other->rrtype < *self->rrtype;
+ c = *other->cppobj < *self->cppobj;
break;
case Py_GE:
- c = *other->rrtype < *self->rrtype ||
- *self->rrtype == *other->rrtype;
+ c = *other->cppobj < *self->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
default:
PyErr_SetString(PyExc_IndexError,
@@ -334,131 +265,200 @@ RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
//
// Common function for RRType_A/NS/etc.
//
-static PyObject* RRType_createStatic(RRType stc) {
+PyObject* RRType_createStatic(RRType stc) {
s_RRType* ret = PyObject_New(s_RRType, &rrtype_type);
if (ret != NULL) {
- ret->rrtype = new RRType(stc);
+ ret->cppobj = new RRType(stc);
}
return (ret);
}
-static PyObject*
+PyObject*
RRType_NSEC3PARAM(s_RRType*) {
return (RRType_createStatic(RRType::NSEC3PARAM()));
}
-static PyObject*
+PyObject*
RRType_DNAME(s_RRType*) {
return (RRType_createStatic(RRType::DNAME()));
}
-static PyObject*
+PyObject*
RRType_PTR(s_RRType*) {
return (RRType_createStatic(RRType::PTR()));
}
-static PyObject*
+PyObject*
RRType_MX(s_RRType*) {
return (RRType_createStatic(RRType::MX()));
}
-static PyObject*
+PyObject*
RRType_DNSKEY(s_RRType*) {
return (RRType_createStatic(RRType::DNSKEY()));
}
-static PyObject*
+PyObject*
RRType_TXT(s_RRType*) {
return (RRType_createStatic(RRType::TXT()));
}
-static PyObject*
+PyObject*
RRType_RRSIG(s_RRType*) {
return (RRType_createStatic(RRType::RRSIG()));
}
-static PyObject*
+PyObject*
RRType_NSEC(s_RRType*) {
return (RRType_createStatic(RRType::NSEC()));
}
-static PyObject*
+PyObject*
RRType_AAAA(s_RRType*) {
return (RRType_createStatic(RRType::AAAA()));
}
-static PyObject*
+PyObject*
RRType_DS(s_RRType*) {
return (RRType_createStatic(RRType::DS()));
}
-static PyObject*
+PyObject*
RRType_OPT(s_RRType*) {
return (RRType_createStatic(RRType::OPT()));
}
-static PyObject*
+PyObject*
RRType_A(s_RRType*) {
return (RRType_createStatic(RRType::A()));
}
-static PyObject*
+PyObject*
RRType_NS(s_RRType*) {
return (RRType_createStatic(RRType::NS()));
}
-static PyObject*
+PyObject*
RRType_CNAME(s_RRType*) {
return (RRType_createStatic(RRType::CNAME()));
}
-static PyObject*
+PyObject*
RRType_SOA(s_RRType*) {
return (RRType_createStatic(RRType::SOA()));
}
-static PyObject*
+PyObject*
RRType_NSEC3(s_RRType*) {
return (RRType_createStatic(RRType::NSEC3()));
}
-static PyObject*
+PyObject*
RRType_IXFR(s_RRType*) {
return (RRType_createStatic(RRType::IXFR()));
}
-static PyObject*
+PyObject*
RRType_AXFR(s_RRType*) {
return (RRType_createStatic(RRType::AXFR()));
}
-static PyObject*
+PyObject*
RRType_ANY(s_RRType*) {
return (RRType_createStatic(RRType::ANY()));
}
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
-// end of RRType
+PyObject* po_InvalidRRType;
+PyObject* po_IncompleteRRType;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRType
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrtype_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRType",
+ sizeof(s_RRType), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRType_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRType_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The RRType class encapsulates DNS resource record types.\n\n"
+ "This class manages the 16-bit integer type codes in quite a straightforward "
+ "way. The only non trivial task is to handle textual representations of "
+ "RR types, such as \"A\", \"AAAA\", or \"TYPE65534\".",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RRType_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRType_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRType_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+PyObject*
+createRRTypeObject(const RRType& source) {
+ RRTypeContainer container(PyObject_New(s_RRType, &rrtype_type));
+ container.set(new RRType(source));
+ return (container.release());
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_RRType(PyObject* mod) {
- // Add the exceptions to the module
- po_InvalidRRType = PyErr_NewException("pydnspp.InvalidRRType", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRRType", po_InvalidRRType);
- po_IncompleteRRType = PyErr_NewException("pydnspp.IncompleteRRType", NULL, NULL);
- PyModule_AddObject(mod, "IncompleteRRType", po_IncompleteRRType);
-
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rrtype_type) < 0) {
- return (false);
+PyRRType_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rrtype_type));
+}
+
+const RRType&
+PyRRType_ToRRType(const PyObject* rrtype_obj) {
+ if (rrtype_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRType PyObject conversion");
}
- Py_INCREF(&rrtype_type);
- PyModule_AddObject(mod, "RRType",
- reinterpret_cast<PyObject*>(&rrtype_type));
-
- return (true);
+ const s_RRType* rrtype = static_cast<const s_RRType*>(rrtype_obj);
+ return (*rrtype->cppobj);
}
+
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/rrtype_python.h b/src/lib/dns/python/rrtype_python.h
new file mode 100644
index 0000000..596598e
--- /dev/null
+++ b/src/lib/dns/python/rrtype_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRTYPE_H
+#define __PYTHON_RRTYPE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRType;
+
+namespace python {
+
+extern PyObject* po_InvalidRRType;
+extern PyObject* po_IncompleteRRType;
+
+extern PyTypeObject rrtype_type;
+
+/// This is a simple shortcut to create a python RRType object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRTypeObject(const RRType& source);
+
+/// \brief Checks if the given python object is a RRType object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRType, false otherwise
+bool PyRRType_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRType object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRType; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRType_Check()
+///
+/// \note This is not a copy; if the RRType is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrtype_obj The rrtype object to convert
+const RRType& PyRRType_ToRRType(const PyObject* rrtype_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRTYPE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tests/Makefile.am b/src/lib/dns/python/tests/Makefile.am
index 61d7df6..d1273f3 100644
--- a/src/lib/dns/python/tests/Makefile.am
+++ b/src/lib/dns/python/tests/Makefile.am
@@ -24,7 +24,7 @@ EXTRA_DIST += testutil.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/dns/python/tests/message_python_test.py b/src/lib/dns/python/tests/message_python_test.py
index c731253..8f2d732 100644
--- a/src/lib/dns/python/tests/message_python_test.py
+++ b/src/lib/dns/python/tests/message_python_test.py
@@ -29,9 +29,9 @@ if "TESTDATA_PATH" in os.environ:
else:
testdata_path = "../tests/testdata"
-def factoryFromFile(message, file):
+def factoryFromFile(message, file, parse_options=Message.PARSE_DEFAULT):
data = read_wire_data(file)
- message.from_wire(data)
+ message.from_wire(data, parse_options)
return data
# we don't have direct comparison for rrsets right now (should we?
@@ -466,6 +466,54 @@ test.example.com. 3600 IN A 192.0.2.2
self.assertEqual("192.0.2.2", rdata[1].to_text())
self.assertEqual(2, len(rdata))
+ def test_from_wire_short_buffer(self):
+ data = read_wire_data("message_fromWire22.wire")
+ self.assertRaises(DNSMessageFORMERR, self.p.from_wire, data[:-1])
+
+ def test_from_wire_combind_rrs(self):
+ factoryFromFile(self.p, "message_fromWire19.wire")
+ rrset = self.p.get_section(Message.SECTION_ANSWER)[0]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ self.assertEqual(2, len(rrset.get_rdata()))
+
+ rrset = self.p.get_section(Message.SECTION_ANSWER)[1]
+ self.assertEqual(RRType("AAAA"), rrset.get_type())
+ self.assertEqual(1, len(rrset.get_rdata()))
+
+ def check_preserve_rrs(self, message, section):
+ rrset = message.get_section(section)[0]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('192.0.2.1', rdata[0].to_text())
+
+ rrset = message.get_section(section)[1]
+ self.assertEqual(RRType("AAAA"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('2001:db8::1', rdata[0].to_text())
+
+ rrset = message.get_section(section)[2]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('192.0.2.2', rdata[0].to_text())
+
+ def test_from_wire_preserve_answer(self):
+ factoryFromFile(self.p, "message_fromWire19.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_ANSWER)
+
+ def test_from_wire_preserve_authority(self):
+ factoryFromFile(self.p, "message_fromWire20.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_AUTHORITY)
+
+ def test_from_wire_preserve_additional(self):
+ factoryFromFile(self.p, "message_fromWire21.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_ADDITIONAL)
+
def test_EDNS0ExtCode(self):
# Extended Rcode = BADVERS
message_parse = Message(Message.PARSE)
diff --git a/src/lib/dns/python/tsig_python.cc b/src/lib/dns/python/tsig_python.cc
index db93a08..0764e33 100644
--- a/src/lib/dns/python/tsig_python.cc
+++ b/src/lib/dns/python/tsig_python.cc
@@ -37,23 +37,18 @@ using namespace isc::util::python;
using namespace isc::dns;
using namespace isc::dns::python;
-//
-// Definition of the classes
-//
-
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
-//
-// TSIGContext
-//
-
-// Trivial constructor.
-s_TSIGContext::s_TSIGContext() : cppobj(NULL) {
-}
-
namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGContext : public PyObject {
+public:
+ s_TSIGContext() : cppobj(NULL) {};
+ TSIGContext* cppobj;
+};
+
// Shortcut type which would be convenient for adding class variables safely.
typedef CPPPyObjectContainer<s_TSIGContext, TSIGContext> TSIGContextContainer;
@@ -101,23 +96,23 @@ int
TSIGContext_init(s_TSIGContext* self, PyObject* args) {
try {
// "From key" constructor
- const s_TSIGKey* tsigkey_obj;
+ const PyObject* tsigkey_obj;
if (PyArg_ParseTuple(args, "O!", &tsigkey_type, &tsigkey_obj)) {
- self->cppobj = new TSIGContext(*tsigkey_obj->cppobj);
+ self->cppobj = new TSIGContext(PyTSIGKey_ToTSIGKey(tsigkey_obj));
return (0);
}
// "From key param + keyring" constructor
PyErr_Clear();
- const s_Name* keyname_obj;
- const s_Name* algname_obj;
- const s_TSIGKeyRing* keyring_obj;
+ const PyObject* keyname_obj;
+ const PyObject* algname_obj;
+ const PyObject* keyring_obj;
if (PyArg_ParseTuple(args, "O!O!O!", &name_type, &keyname_obj,
&name_type, &algname_obj, &tsigkeyring_type,
&keyring_obj)) {
- self->cppobj = new TSIGContext(*keyname_obj->cppobj,
- *algname_obj->cppobj,
- *keyring_obj->cppobj);
+ self->cppobj = new TSIGContext(PyName_ToName(keyname_obj),
+ PyName_ToName(algname_obj),
+ PyTSIGKeyRing_ToTSIGKeyRing(keyring_obj));
return (0);
}
} catch (const exception& ex) {
@@ -153,7 +148,7 @@ PyObject*
TSIGContext_getError(s_TSIGContext* self) {
try {
PyObjectContainer container(createTSIGErrorObject(
- self->cppobj->getError()));
+ self->cppobj->getError()));
return (Py_BuildValue("O", container.get()));
} catch (const exception& ex) {
const string ex_what =
@@ -205,13 +200,13 @@ PyObject*
TSIGContext_verify(s_TSIGContext* self, PyObject* args) {
const char* data;
Py_ssize_t data_len;
- s_TSIGRecord* py_record;
+ PyObject* py_record;
PyObject* py_maybe_none;
- TSIGRecord* record;
+ const TSIGRecord* record;
if (PyArg_ParseTuple(args, "O!y#", &tsigrecord_type, &py_record,
&data, &data_len)) {
- record = py_record->cppobj;
+ record = &PyTSIGRecord_ToTSIGRecord(py_record);
} else if (PyArg_ParseTuple(args, "Oy#", &py_maybe_none, &data,
&data_len)) {
record = NULL;
@@ -264,7 +259,7 @@ PyTypeObject tsigcontext_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
NULL, // tp_str
NULL, // tp_getattro
@@ -307,58 +302,24 @@ PyTypeObject tsigcontext_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
bool
-initModulePart_TSIGContext(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsigcontext_type) < 0) {
- return (false);
+PyTSIGContext_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- void* p = &tsigcontext_type;
- if (PyModule_AddObject(mod, "TSIGContext",
- static_cast<PyObject*>(p)) < 0) {
- return (false);
- }
- Py_INCREF(&tsigcontext_type);
+ return (PyObject_TypeCheck(obj, &tsigcontext_type));
+}
- try {
- // Class specific exceptions
- po_TSIGContextError = PyErr_NewException("pydnspp.TSIGContextError",
- po_IscException, NULL);
- PyObjectContainer(po_TSIGContextError).installToModule(
- mod, "TSIGContextError");
-
- // Constant class variables
- installClassVariable(tsigcontext_type, "STATE_INIT",
- Py_BuildValue("I", TSIGContext::INIT));
- installClassVariable(tsigcontext_type, "STATE_SENT_REQUEST",
- Py_BuildValue("I", TSIGContext::SENT_REQUEST));
- installClassVariable(tsigcontext_type, "STATE_RECEIVED_REQUEST",
- Py_BuildValue("I", TSIGContext::RECEIVED_REQUEST));
- installClassVariable(tsigcontext_type, "STATE_SENT_RESPONSE",
- Py_BuildValue("I", TSIGContext::SENT_RESPONSE));
- installClassVariable(tsigcontext_type, "STATE_VERIFIED_RESPONSE",
- Py_BuildValue("I",
- TSIGContext::VERIFIED_RESPONSE));
-
- installClassVariable(tsigcontext_type, "DEFAULT_FUDGE",
- Py_BuildValue("H", TSIGContext::DEFAULT_FUDGE));
- } catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in TSIGContext initialization: " +
- string(ex.what());
- PyErr_SetString(po_IscException, ex_what.c_str());
- return (false);
- } catch (...) {
- PyErr_SetString(PyExc_SystemError,
- "Unexpected failure in TSIGContext initialization");
- return (false);
+TSIGContext&
+PyTSIGContext_ToTSIGContext(PyObject* tsigcontext_obj) {
+ if (tsigcontext_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIGContext PyObject conversion");
}
-
- return (true);
+ s_TSIGContext* tsigcontext = static_cast<s_TSIGContext*>(tsigcontext_obj);
+ return (*tsigcontext->cppobj);
}
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsig_python.h b/src/lib/dns/python/tsig_python.h
index f9b4f7b..e4e9fff 100644
--- a/src/lib/dns/python/tsig_python.h
+++ b/src/lib/dns/python/tsig_python.h
@@ -23,19 +23,31 @@ class TSIGContext;
namespace python {
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGContext : public PyObject {
-public:
- s_TSIGContext();
- TSIGContext* cppobj;
-};
-
extern PyTypeObject tsigcontext_type;
// Class specific exceptions
extern PyObject* po_TSIGContextError;
-bool initModulePart_TSIGContext(PyObject* mod);
+/// \brief Checks if the given python object is a TSIGContext object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGContext, false otherwise
+bool PyTSIGContext_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGContext object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGContext; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGContext_Check()
+///
+/// \note This is not a copy; if the TSIGContext is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigcontext_obj The tsigcontext object to convert
+TSIGContext& PyTSIGContext_ToTSIGContext(PyObject* tsigcontext_obj);
+
} // namespace python
} // namespace dns
diff --git a/src/lib/dns/python/tsig_rdata_python.cc b/src/lib/dns/python/tsig_rdata_python.cc
index 4e4f287..6ec0f09 100644
--- a/src/lib/dns/python/tsig_rdata_python.cc
+++ b/src/lib/dns/python/tsig_rdata_python.cc
@@ -12,6 +12,7 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <string>
@@ -32,23 +33,19 @@ using namespace isc::dns;
using namespace isc::dns::rdata;
using namespace isc::dns::python;
-//
-// Definition of the classes
-//
-
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
-//
-// TSIG RDATA
-//
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIG : public PyObject {
+public:
+ s_TSIG() : cppobj(NULL) {};
+ const rdata::any::TSIG* cppobj;
+};
-// Trivial constructor.
-s_TSIG::s_TSIG() : cppobj(NULL) {
-}
-namespace {
// Shortcut type which would be convenient for adding class variables safely.
typedef CPPPyObjectContainer<s_TSIG, any::TSIG> TSIGContainer;
@@ -235,7 +232,7 @@ TSIG_toWire(const s_TSIG* const self, PyObject* args) {
self, args));
}
-PyObject*
+PyObject*
TSIG_richcmp(const s_TSIG* const self,
const s_TSIG* const other,
const int op)
@@ -302,7 +299,7 @@ PyTypeObject tsig_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
TSIG_str, // tp_str
NULL, // tp_getattro
@@ -340,30 +337,31 @@ PyTypeObject tsig_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_TSIG(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsig_type) < 0) {
- return (false);
- }
- void* p = &tsig_type;
- if (PyModule_AddObject(mod, "TSIG", static_cast<PyObject*>(p)) < 0) {
- return (false);
- }
- Py_INCREF(&tsig_type);
-
- return (true);
-}
-
PyObject*
createTSIGObject(const any::TSIG& source) {
- TSIGContainer container = PyObject_New(s_TSIG, &tsig_type);
+ TSIGContainer container(PyObject_New(s_TSIG, &tsig_type));
container.set(new any::TSIG(source));
return (container.release());
}
+
+bool
+PyTSIG_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &tsig_type));
+}
+
+const any::TSIG&
+PyTSIG_ToTSIG(const PyObject* tsig_obj) {
+ if (tsig_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIG PyObject conversion");
+ }
+ const s_TSIG* tsig = static_cast<const s_TSIG*>(tsig_obj);
+ return (*tsig->cppobj);
+}
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsig_rdata_python.h b/src/lib/dns/python/tsig_rdata_python.h
index e5e0c6c..a84d9e8 100644
--- a/src/lib/dns/python/tsig_rdata_python.h
+++ b/src/lib/dns/python/tsig_rdata_python.h
@@ -27,17 +27,8 @@ class TSIG;
namespace python {
-// The s_* Class simply covers one instantiation of the object
-class s_TSIG : public PyObject {
-public:
- s_TSIG();
- const rdata::any::TSIG* cppobj;
-};
-
extern PyTypeObject tsig_type;
-bool initModulePart_TSIG(PyObject* mod);
-
/// This is A simple shortcut to create a python TSIG object (in the
/// form of a pointer to PyObject) with minimal exception safety.
/// On success, it returns a valid pointer to PyObject with a reference
@@ -47,6 +38,26 @@ bool initModulePart_TSIG(PyObject* mod);
/// followed by necessary setup for python exception.
PyObject* createTSIGObject(const rdata::any::TSIG& source);
+/// \brief Checks if the given python object is a TSIG object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIG, false otherwise
+bool PyTSIG_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIG object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIG; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIG_Check()
+///
+/// \note This is not a copy; if the TSIG is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsig_obj The tsig object to convert
+const rdata::any::TSIG& PyTSIG_ToTSIG(const PyObject* tsig_obj);
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsigerror_python.cc b/src/lib/dns/python/tsigerror_python.cc
index 0ad4716..7a0217e 100644
--- a/src/lib/dns/python/tsigerror_python.cc
+++ b/src/lib/dns/python/tsigerror_python.cc
@@ -30,26 +30,21 @@ using namespace isc::util::python;
using namespace isc::dns;
using namespace isc::dns::python;
-//
-// Definition of the classes
-//
-
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
-//
-// TSIGError
-//
-
-// Trivial constructor.
-s_TSIGError::s_TSIGError() : cppobj(NULL) {
-}
-
// Import pydoc text
#include "tsigerror_python_inc.cc"
namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGError : public PyObject {
+public:
+ s_TSIGError() : cppobj(NULL) {};
+ const TSIGError* cppobj;
+};
+
// Shortcut type which would be convenient for adding class variables safely.
typedef CPPPyObjectContainer<s_TSIGError, TSIGError> TSIGErrorContainer;
@@ -107,9 +102,9 @@ TSIGError_init(s_TSIGError* self, PyObject* args) {
// Constructor from Rcode
PyErr_Clear();
- s_Rcode* py_rcode;
+ PyObject* py_rcode;
if (PyArg_ParseTuple(args, "O!", &rcode_type, &py_rcode)) {
- self->cppobj = new TSIGError(*py_rcode->cppobj);
+ self->cppobj = new TSIGError(PyRcode_ToRcode(py_rcode));
return (0);
}
} catch (const isc::OutOfRange& ex) {
@@ -172,13 +167,8 @@ TSIGError_str(PyObject* self) {
PyObject*
TSIGError_toRcode(const s_TSIGError* const self) {
- typedef CPPPyObjectContainer<s_Rcode, Rcode> RcodePyObjectContainer;
-
try {
- RcodePyObjectContainer rcode_container(PyObject_New(s_Rcode,
- &rcode_type));
- rcode_container.set(new Rcode(self->cppobj->toRcode()));
- return (rcode_container.release());
+ return (createRcodeObject(self->cppobj->toRcode()));
} catch (const exception& ex) {
const string ex_what =
"Failed to convert TSIGError to Rcode: " + string(ex.what());
@@ -190,7 +180,7 @@ TSIGError_toRcode(const s_TSIGError* const self) {
return (NULL);
}
-PyObject*
+PyObject*
TSIGError_richcmp(const s_TSIGError* const self,
const s_TSIGError* const other,
const int op)
@@ -252,7 +242,7 @@ PyTypeObject tsigerror_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
// THIS MAY HAVE TO BE CHANGED TO NULL:
TSIGError_str, // tp_str
@@ -290,78 +280,9 @@ PyTypeObject tsigerror_type = {
0 // tp_version_tag
};
-namespace {
-// Trivial shortcut to create and install TSIGError constants.
-inline void
-installTSIGErrorConstant(const char* name, const TSIGError& val) {
- TSIGErrorContainer container(PyObject_New(s_TSIGError, &tsigerror_type));
- container.installAsClassVariable(tsigerror_type, name, new TSIGError(val));
-}
-}
-
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_TSIGError(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsigerror_type) < 0) {
- return (false);
- }
- void* p = &tsigerror_type;
- if (PyModule_AddObject(mod, "TSIGError", static_cast<PyObject*>(p)) < 0) {
- return (false);
- }
- Py_INCREF(&tsigerror_type);
-
- try {
- // Constant class variables
- // Error codes (bare values)
- installClassVariable(tsigerror_type, "BAD_SIG_CODE",
- Py_BuildValue("H", TSIGError::BAD_SIG_CODE));
- installClassVariable(tsigerror_type, "BAD_KEY_CODE",
- Py_BuildValue("H", TSIGError::BAD_KEY_CODE));
- installClassVariable(tsigerror_type, "BAD_TIME_CODE",
- Py_BuildValue("H", TSIGError::BAD_TIME_CODE));
-
- // Error codes (constant objects)
- installTSIGErrorConstant("NOERROR", TSIGError::NOERROR());
- installTSIGErrorConstant("FORMERR", TSIGError::FORMERR());
- installTSIGErrorConstant("SERVFAIL", TSIGError::SERVFAIL());
- installTSIGErrorConstant("NXDOMAIN", TSIGError::NXDOMAIN());
- installTSIGErrorConstant("NOTIMP", TSIGError::NOTIMP());
- installTSIGErrorConstant("REFUSED", TSIGError::REFUSED());
- installTSIGErrorConstant("YXDOMAIN", TSIGError::YXDOMAIN());
- installTSIGErrorConstant("YXRRSET", TSIGError::YXRRSET());
- installTSIGErrorConstant("NXRRSET", TSIGError::NXRRSET());
- installTSIGErrorConstant("NOTAUTH", TSIGError::NOTAUTH());
- installTSIGErrorConstant("NOTZONE", TSIGError::NOTZONE());
- installTSIGErrorConstant("RESERVED11", TSIGError::RESERVED11());
- installTSIGErrorConstant("RESERVED12", TSIGError::RESERVED12());
- installTSIGErrorConstant("RESERVED13", TSIGError::RESERVED13());
- installTSIGErrorConstant("RESERVED14", TSIGError::RESERVED14());
- installTSIGErrorConstant("RESERVED15", TSIGError::RESERVED15());
- installTSIGErrorConstant("BAD_SIG", TSIGError::BAD_SIG());
- installTSIGErrorConstant("BAD_KEY", TSIGError::BAD_KEY());
- installTSIGErrorConstant("BAD_TIME", TSIGError::BAD_TIME());
- } catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in TSIGError initialization: " +
- string(ex.what());
- PyErr_SetString(po_IscException, ex_what.c_str());
- return (false);
- } catch (...) {
- PyErr_SetString(PyExc_SystemError,
- "Unexpected failure in TSIGError initialization");
- return (false);
- }
-
- return (true);
-}
-
PyObject*
createTSIGErrorObject(const TSIGError& source) {
- TSIGErrorContainer container = PyObject_New(s_TSIGError, &tsigerror_type);
+ TSIGErrorContainer container(PyObject_New(s_TSIGError, &tsigerror_type));
container.set(new TSIGError(source));
return (container.release());
}
diff --git a/src/lib/dns/python/tsigerror_python.h b/src/lib/dns/python/tsigerror_python.h
index 735a480..0b5b630 100644
--- a/src/lib/dns/python/tsigerror_python.h
+++ b/src/lib/dns/python/tsigerror_python.h
@@ -23,17 +23,8 @@ class TSIGError;
namespace python {
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGError : public PyObject {
-public:
- s_TSIGError();
- const TSIGError* cppobj;
-};
-
extern PyTypeObject tsigerror_type;
-bool initModulePart_TSIGError(PyObject* mod);
-
/// This is A simple shortcut to create a python TSIGError object (in the
/// form of a pointer to PyObject) with minimal exception safety.
/// On success, it returns a valid pointer to PyObject with a reference
@@ -42,6 +33,7 @@ bool initModulePart_TSIGError(PyObject* mod);
/// This function is expected to be called with in a try block
/// followed by necessary setup for python exception.
PyObject* createTSIGErrorObject(const TSIGError& source);
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsigkey_python.cc b/src/lib/dns/python/tsigkey_python.cc
index f0906cb..cf79c1a 100644
--- a/src/lib/dns/python/tsigkey_python.cc
+++ b/src/lib/dns/python/tsigkey_python.cc
@@ -31,10 +31,6 @@ using namespace isc::util::python;
using namespace isc::dns;
using namespace isc::dns::python;
-//
-// Definition of the classes
-//
-
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
@@ -43,11 +39,14 @@ using namespace isc::dns::python;
// TSIGKey
//
+namespace {
// The s_* Class simply covers one instantiation of the object
+class s_TSIGKey : public PyObject {
+public:
+ s_TSIGKey() : cppobj(NULL) {};
+ TSIGKey* cppobj;
+};
-s_TSIGKey::s_TSIGKey() : cppobj(NULL) {}
-
-namespace {
//
// We declare the functions here, the definitions are below
// the type definition of the object, since both can use the other
@@ -96,8 +95,8 @@ TSIGKey_init(s_TSIGKey* self, PyObject* args) {
}
PyErr_Clear();
- const s_Name* key_name;
- const s_Name* algorithm_name;
+ const PyObject* key_name;
+ const PyObject* algorithm_name;
PyObject* bytes_obj;
const char* secret;
Py_ssize_t secret_len;
@@ -107,8 +106,8 @@ TSIGKey_init(s_TSIGKey* self, PyObject* args) {
if (secret_len == 0) {
secret = NULL;
}
- self->cppobj = new TSIGKey(*key_name->cppobj,
- *algorithm_name->cppobj,
+ self->cppobj = new TSIGKey(PyName_ToName(key_name),
+ PyName_ToName(algorithm_name),
secret, secret_len);
return (0);
}
@@ -196,7 +195,7 @@ PyTypeObject tsigkey_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
NULL, // tp_str
NULL, // tp_getattro
@@ -233,49 +232,20 @@ PyTypeObject tsigkey_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
bool
-initModulePart_TSIGKey(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsigkey_type) < 0) {
- return (false);
- }
- void* p = &tsigkey_type;
- if (PyModule_AddObject(mod, "TSIGKey", static_cast<PyObject*>(p)) != 0) {
- return (false);
- }
- Py_INCREF(&tsigkey_type);
-
- try {
- // Constant class variables
- installClassVariable(tsigkey_type, "HMACMD5_NAME",
- createNameObject(TSIGKey::HMACMD5_NAME()));
- installClassVariable(tsigkey_type, "HMACSHA1_NAME",
- createNameObject(TSIGKey::HMACSHA1_NAME()));
- installClassVariable(tsigkey_type, "HMACSHA256_NAME",
- createNameObject(TSIGKey::HMACSHA256_NAME()));
- installClassVariable(tsigkey_type, "HMACSHA224_NAME",
- createNameObject(TSIGKey::HMACSHA224_NAME()));
- installClassVariable(tsigkey_type, "HMACSHA384_NAME",
- createNameObject(TSIGKey::HMACSHA384_NAME()));
- installClassVariable(tsigkey_type, "HMACSHA512_NAME",
- createNameObject(TSIGKey::HMACSHA512_NAME()));
- } catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in TSIGKey initialization: " +
- string(ex.what());
- PyErr_SetString(po_IscException, ex_what.c_str());
- return (false);
- } catch (...) {
- PyErr_SetString(PyExc_SystemError,
- "Unexpected failure in TSIGKey initialization");
- return (false);
+PyTSIGKey_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
+ return (PyObject_TypeCheck(obj, &tsigkey_type));
+}
- return (true);
+const TSIGKey&
+PyTSIGKey_ToTSIGKey(const PyObject* tsigkey_obj) {
+ const s_TSIGKey* tsigkey = static_cast<const s_TSIGKey*>(tsigkey_obj);
+ return (*tsigkey->cppobj);
}
+
} // namespace python
} // namespace dns
} // namespace isc
@@ -287,13 +257,14 @@ initModulePart_TSIGKey(PyObject* mod) {
// TSIGKeyRing
//
+namespace {
// The s_* Class simply covers one instantiation of the object
+class s_TSIGKeyRing : public PyObject {
+public:
+ s_TSIGKeyRing() : cppobj(NULL) {};
+ TSIGKeyRing* cppobj;
+};
-// The s_* Class simply covers one instantiation of the object
-
-s_TSIGKeyRing::s_TSIGKeyRing() : cppobj(NULL) {}
-
-namespace {
//
// We declare the functions here, the definitions are below
// the type definition of the object, since both can use the other
@@ -329,7 +300,7 @@ TSIGKeyRing_init(s_TSIGKeyRing* self, PyObject* args) {
"Invalid arguments to TSIGKeyRing constructor");
return (-1);
}
-
+
self->cppobj = new(nothrow) TSIGKeyRing();
if (self->cppobj == NULL) {
PyErr_SetString(po_IscException, "Allocating TSIGKeyRing failed");
@@ -354,7 +325,7 @@ TSIGKeyRing_size(const s_TSIGKeyRing* const self) {
PyObject*
TSIGKeyRing_add(const s_TSIGKeyRing* const self, PyObject* args) {
s_TSIGKey* tsigkey;
-
+
if (PyArg_ParseTuple(args, "O!", &tsigkey_type, &tsigkey)) {
try {
const TSIGKeyRing::Result result =
@@ -374,11 +345,11 @@ TSIGKeyRing_add(const s_TSIGKeyRing* const self, PyObject* args) {
PyObject*
TSIGKeyRing_remove(const s_TSIGKeyRing* self, PyObject* args) {
- s_Name* key_name;
+ PyObject* key_name;
if (PyArg_ParseTuple(args, "O!", &name_type, &key_name)) {
const TSIGKeyRing::Result result =
- self->cppobj->remove(*key_name->cppobj);
+ self->cppobj->remove(PyName_ToName(key_name));
return (Py_BuildValue("I", result));
}
@@ -390,13 +361,14 @@ TSIGKeyRing_remove(const s_TSIGKeyRing* self, PyObject* args) {
PyObject*
TSIGKeyRing_find(const s_TSIGKeyRing* self, PyObject* args) {
- s_Name* key_name;
- s_Name* algorithm_name;
+ PyObject* key_name;
+ PyObject* algorithm_name;
if (PyArg_ParseTuple(args, "O!O!", &name_type, &key_name,
&name_type, &algorithm_name)) {
const TSIGKeyRing::FindResult result =
- self->cppobj->find(*key_name->cppobj, *algorithm_name->cppobj);
+ self->cppobj->find(PyName_ToName(key_name),
+ PyName_ToName(algorithm_name));
if (result.key != NULL) {
s_TSIGKey* key = PyObject_New(s_TSIGKey, &tsigkey_type);
if (key == NULL) {
@@ -436,7 +408,7 @@ PyTypeObject tsigkeyring_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
NULL, // tp_str
NULL, // tp_getattro
@@ -473,27 +445,24 @@ PyTypeObject tsigkeyring_type = {
};
bool
-initModulePart_TSIGKeyRing(PyObject* mod) {
- if (PyType_Ready(&tsigkeyring_type) < 0) {
- return (false);
+PyTSIGKeyRing_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&tsigkeyring_type);
- void* p = &tsigkeyring_type;
- if (PyModule_AddObject(mod, "TSIGKeyRing",
- static_cast<PyObject*>(p)) != 0) {
- Py_DECREF(&tsigkeyring_type);
- return (false);
- }
-
- addClassVariable(tsigkeyring_type, "SUCCESS",
- Py_BuildValue("I", TSIGKeyRing::SUCCESS));
- addClassVariable(tsigkeyring_type, "EXIST",
- Py_BuildValue("I", TSIGKeyRing::EXIST));
- addClassVariable(tsigkeyring_type, "NOTFOUND",
- Py_BuildValue("I", TSIGKeyRing::NOTFOUND));
+ return (PyObject_TypeCheck(obj, &tsigkeyring_type));
+}
- return (true);
+const TSIGKeyRing&
+PyTSIGKeyRing_ToTSIGKeyRing(const PyObject* tsigkeyring_obj) {
+ if (tsigkeyring_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIGKeyRing PyObject conversion");
+ }
+ const s_TSIGKeyRing* tsigkeyring =
+ static_cast<const s_TSIGKeyRing*>(tsigkeyring_obj);
+ return (*tsigkeyring->cppobj);
}
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsigkey_python.h b/src/lib/dns/python/tsigkey_python.h
index 51b3ae7..6c3d2e3 100644
--- a/src/lib/dns/python/tsigkey_python.h
+++ b/src/lib/dns/python/tsigkey_python.h
@@ -24,24 +24,46 @@ class TSIGKeyRing;
namespace python {
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGKey : public PyObject {
-public:
- s_TSIGKey();
- TSIGKey* cppobj;
-};
-
-class s_TSIGKeyRing : public PyObject {
-public:
- s_TSIGKeyRing();
- TSIGKeyRing* cppobj;
-};
-
extern PyTypeObject tsigkey_type;
extern PyTypeObject tsigkeyring_type;
-bool initModulePart_TSIGKey(PyObject* mod);
-bool initModulePart_TSIGKeyRing(PyObject* mod);
+/// \brief Checks if the given python object is a TSIGKey object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGKey, false otherwise
+bool PyTSIGKey_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGKey object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGKey; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGKey_Check()
+///
+/// \note This is not a copy; if the TSIGKey is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigkey_obj The tsigkey object to convert
+const TSIGKey& PyTSIGKey_ToTSIGKey(const PyObject* tsigkey_obj);
+
+/// \brief Checks if the given python object is a TSIGKeyRing object
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGKeyRing, false otherwise
+bool PyTSIGKeyRing_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGKeyRing object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGKeyRing; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGKeyRing_Check()
+///
+/// \note This is not a copy; if the TSIGKeyRing is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigkeyring_obj The tsigkeyring object to convert
+const TSIGKeyRing& PyTSIGKeyRing_ToTSIGKeyRing(const PyObject* tsigkeyring_obj);
} // namespace python
} // namespace dns
diff --git a/src/lib/dns/python/tsigrecord_python.cc b/src/lib/dns/python/tsigrecord_python.cc
index 8a78b5e..c754dd2 100644
--- a/src/lib/dns/python/tsigrecord_python.cc
+++ b/src/lib/dns/python/tsigrecord_python.cc
@@ -32,10 +32,6 @@ using namespace isc::util::python;
using namespace isc::dns;
using namespace isc::dns::python;
-//
-// Definition of the classes
-//
-
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
@@ -44,11 +40,14 @@ using namespace isc::dns::python;
// TSIGRecord
//
-// Trivial constructor.
-s_TSIGRecord::s_TSIGRecord() : cppobj(NULL) {
-}
-
namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGRecord : public PyObject {
+public:
+ s_TSIGRecord() : cppobj(NULL) {};
+ TSIGRecord* cppobj;
+};
+
// Shortcut type which would be convenient for adding class variables safely.
typedef CPPPyObjectContainer<s_TSIGRecord, TSIGRecord> TSIGRecordContainer;
@@ -102,11 +101,12 @@ PyMethodDef TSIGRecord_methods[] = {
int
TSIGRecord_init(s_TSIGRecord* self, PyObject* args) {
try {
- const s_Name* py_name;
- const s_TSIG* py_tsig;
+ const PyObject* py_name;
+ const PyObject* py_tsig;
if (PyArg_ParseTuple(args, "O!O!", &name_type, &py_name,
&tsig_type, &py_tsig)) {
- self->cppobj = new TSIGRecord(*py_name->cppobj, *py_tsig->cppobj);
+ self->cppobj = new TSIGRecord(PyName_ToName(py_name),
+ PyTSIG_ToTSIG(py_tsig));
return (0);
}
} catch (const exception& ex) {
@@ -226,7 +226,7 @@ PyTypeObject tsigrecord_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
TSIGRecord_str, // tp_str
NULL, // tp_getattro
@@ -262,50 +262,32 @@ PyTypeObject tsigrecord_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
+PyObject*
+createTSIGRecordObject(const TSIGRecord& source) {
+ TSIGRecordContainer container(PyObject_New(s_TSIGRecord, &tsigrecord_type));
+ container.set(new TSIGRecord(source));
+ return (container.release());
+}
+
bool
-initModulePart_TSIGRecord(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsigrecord_type) < 0) {
- return (false);
- }
- void* p = &tsigrecord_type;
- if (PyModule_AddObject(mod, "TSIGRecord", static_cast<PyObject*>(p)) < 0) {
- return (false);
+PyTSIGRecord_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&tsigrecord_type);
+ return (PyObject_TypeCheck(obj, &tsigrecord_type));
+}
- // The following template is the typical procedure for installing class
- // variables. If the class doesn't have a class variable, remove the
- // entire try-catch clauses.
- try {
- // Constant class variables
- installClassVariable(tsigrecord_type, "TSIG_TTL",
- Py_BuildValue("I", 0));
- } catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in TSIGRecord initialization: " +
- string(ex.what());
- PyErr_SetString(po_IscException, ex_what.c_str());
- return (false);
- } catch (...) {
- PyErr_SetString(PyExc_SystemError,
- "Unexpected failure in TSIGRecord initialization");
- return (false);
+const TSIGRecord&
+PyTSIGRecord_ToTSIGRecord(PyObject* tsigrecord_obj) {
+ if (tsigrecord_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIGRecord PyObject conversion");
}
-
- return (true);
+ s_TSIGRecord* tsigrecord = static_cast<s_TSIGRecord*>(tsigrecord_obj);
+ return (*tsigrecord->cppobj);
}
-PyObject*
-createTSIGRecordObject(const TSIGRecord& source) {
- TSIGRecordContainer container = PyObject_New(s_TSIGRecord,
- &tsigrecord_type);
- container.set(new TSIGRecord(source));
- return (container.release());
-}
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsigrecord_python.h b/src/lib/dns/python/tsigrecord_python.h
index e0a3526..d6252e1 100644
--- a/src/lib/dns/python/tsigrecord_python.h
+++ b/src/lib/dns/python/tsigrecord_python.h
@@ -23,17 +23,9 @@ class TSIGRecord;
namespace python {
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGRecord : public PyObject {
-public:
- s_TSIGRecord();
- TSIGRecord* cppobj;
-};
extern PyTypeObject tsigrecord_type;
-bool initModulePart_TSIGRecord(PyObject* mod);
-
/// This is A simple shortcut to create a python TSIGRecord object (in the
/// form of a pointer to PyObject) with minimal exception safety.
/// On success, it returns a valid pointer to PyObject with a reference
@@ -43,6 +35,26 @@ bool initModulePart_TSIGRecord(PyObject* mod);
/// followed by necessary setup for python exception.
PyObject* createTSIGRecordObject(const TSIGRecord& source);
+/// \brief Checks if the given python object is a TSIGRecord object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGRecord, false otherwise
+bool PyTSIGRecord_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGRecord object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGRecord; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGRecord_Check()
+///
+/// \note This is not a copy; if the TSIGRecord is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrtype_obj The rrtype object to convert
+const TSIGRecord& PyTSIGRecord_ToTSIGRecord(PyObject* tsigrecord_obj);
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/rdata/any_255/tsig_250.cc b/src/lib/dns/rdata/any_255/tsig_250.cc
index 04a4dc4..4eb72bc 100644
--- a/src/lib/dns/rdata/any_255/tsig_250.cc
+++ b/src/lib/dns/rdata/any_255/tsig_250.cc
@@ -23,6 +23,7 @@
#include <util/encode/base64.h>
#include <dns/messagerenderer.h>
+#include <dns/name.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
#include <dns/tsigerror.h>
diff --git a/src/lib/dns/rdata/generic/afsdb_18.cc b/src/lib/dns/rdata/generic/afsdb_18.cc
new file mode 100644
index 0000000..6afc4de
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.cc
@@ -0,0 +1,171 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+#include <util/strutil.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <boost/lexical_cast.hpp>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::str;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c afsdb_str must be formatted as follows:
+/// \code <subtype> <server name>
+/// \endcode
+/// where server name field must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "1 server.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+AFSDB::AFSDB(const std::string& afsdb_str) :
+ subtype_(0), server_(Name::ROOT_NAME())
+{
+ istringstream iss(afsdb_str);
+
+ try {
+ const uint32_t subtype = tokenToNum<int32_t, 16>(getToken(iss));
+ const Name servername(getToken(iss));
+ string server;
+
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Unexpected input for AFSDB"
+ "RDATA: " << afsdb_str);
+ }
+
+ subtype_ = subtype;
+ server_ = servername;
+
+ } catch (const StringTokenError& ste) {
+ isc_throw(InvalidRdataText, "Invalid AFSDB text: " <<
+ ste.what() << ": " << afsdb_str);
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+AFSDB::AFSDB(InputBuffer& buffer, size_t) :
+ subtype_(buffer.readUint16()), server_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+AFSDB::AFSDB(const AFSDB& other) :
+ Rdata(), subtype_(other.subtype_), server_(other.server_)
+{}
+
+AFSDB&
+AFSDB::operator=(const AFSDB& source) {
+ subtype_ = source.subtype_;
+ server_ = source.server_;
+
+ return (*this);
+}
+
+/// \brief Convert the \c AFSDB to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c AFSDB(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c AFSDB object.
+string
+AFSDB::toText() const {
+ return (boost::lexical_cast<string>(subtype_) + " " + server_.toText());
+}
+
+/// \brief Render the \c AFSDB in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+AFSDB::toWire(OutputBuffer& buffer) const {
+ buffer.writeUint16(subtype_);
+ server_.toWire(buffer);
+}
+
+/// \brief Render the \c AFSDB in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE AFSDB is not "well-known", the server
+/// field (domain name) will not be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+AFSDB::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeUint16(subtype_);
+ renderer.writeName(server_, false);
+}
+
+/// \brief Compare two instances of \c AFSDB RDATA.
+///
+/// See documentation in \c Rdata.
+int
+AFSDB::compare(const Rdata& other) const {
+ const AFSDB& other_afsdb = dynamic_cast<const AFSDB&>(other);
+ if (subtype_ < other_afsdb.subtype_) {
+ return (-1);
+ } else if (subtype_ > other_afsdb.subtype_) {
+ return (1);
+ }
+
+ return (compareNames(server_, other_afsdb.server_));
+}
+
+const Name&
+AFSDB::getServer() const {
+ return (server_);
+}
+
+uint16_t
+AFSDB::getSubtype() const {
+ return (subtype_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/afsdb_18.h b/src/lib/dns/rdata/generic/afsdb_18.h
new file mode 100644
index 0000000..4a46775
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.h
@@ -0,0 +1,74 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::AFSDB class represents the AFSDB RDATA as defined %in
+/// RFC1183.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// AFSDB RDATA.
+class AFSDB : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// This method never throws an exception.
+ AFSDB& operator=(const AFSDB& source);
+ ///
+ /// Specialized methods
+ ///
+
+ /// \brief Return the value of the server field.
+ ///
+ /// \return A reference to a \c Name class object corresponding to the
+ /// internal server name.
+ ///
+ /// This method never throws an exception.
+ const Name& getServer() const;
+
+ /// \brief Return the value of the subtype field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getSubtype() const;
+
+private:
+ uint16_t subtype_;
+ Name server_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/detail/ds_like.h b/src/lib/dns/rdata/generic/detail/ds_like.h
new file mode 100644
index 0000000..b5a35cd
--- /dev/null
+++ b/src/lib/dns/rdata/generic/detail/ds_like.h
@@ -0,0 +1,225 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DS_LIKE_H
+#define __DS_LIKE_H 1
+
+#include <stdint.h>
+
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+namespace isc {
+namespace dns {
+namespace rdata {
+namespace generic {
+namespace detail {
+
+/// \brief \c rdata::DSLikeImpl class represents the DS-like RDATA for DS
+/// and DLV types.
+///
+/// This class implements the basic interfaces inherited by the DS and DLV
+/// classes from the abstract \c rdata::Rdata class, and provides trivial
+/// accessors to DS-like RDATA.
+template <class Type, uint16_t typeCode> class DSLikeImpl {
+ // Common sequence of toWire() operations used for the two versions of
+ // toWire().
+ template <typename Output>
+ void
+ toWireCommon(Output& output) const {
+ output.writeUint16(tag_);
+ output.writeUint8(algorithm_);
+ output.writeUint8(digest_type_);
+ output.writeData(&digest_[0], digest_.size());
+ }
+
+public:
+ /// \brief Constructor from string.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c InvalidRdataText is thrown if the method cannot process the
+ /// parameter data for any of the number of reasons.
+ DSLikeImpl(const std::string& ds_str) {
+ std::istringstream iss(ds_str);
+ // peekc should be of iss's char_type for isspace to work
+ std::istringstream::char_type peekc;
+ std::stringbuf digestbuf;
+ uint32_t tag, algorithm, digest_type;
+
+ iss >> tag >> algorithm >> digest_type;
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText,
+ "Invalid " << RRType(typeCode) << " text");
+ }
+ if (tag > 0xffff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " tag out of range");
+ }
+ if (algorithm > 0xff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " algorithm out of range");
+ }
+ if (digest_type > 0xff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " digest type out of range");
+ }
+
+ iss.read(&peekc, 1);
+ if (!iss.good() || !isspace(peekc, iss.getloc())) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " presentation format error");
+ }
+
+ iss >> &digestbuf;
+
+ tag_ = tag;
+ algorithm_ = algorithm;
+ digest_type_ = digest_type;
+ decodeHex(digestbuf.str(), digest_);
+ }
+
+ /// \brief Constructor from wire-format data.
+ ///
+ /// \param buffer A buffer storing the wire format data.
+ /// \param rdata_len The length of the RDATA in bytes, normally expected
+ /// to be the value of the RDLENGTH field of the corresponding RR.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c InvalidRdataLength is thrown if the input data is too short for the
+ /// type.
+ DSLikeImpl(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len < 4) {
+ isc_throw(InvalidRdataLength, RRType(typeCode) << " too short");
+ }
+
+ tag_ = buffer.readUint16();
+ algorithm_ = buffer.readUint8();
+ digest_type_ = buffer.readUint8();
+
+ rdata_len -= 4;
+ digest_.resize(rdata_len);
+ buffer.readData(&digest_[0], rdata_len);
+ }
+
+ /// \brief The copy constructor.
+ ///
+ /// Trivial for now, we could've used the default one.
+ DSLikeImpl(const DSLikeImpl& source) {
+ digest_ = source.digest_;
+ tag_ = source.tag_;
+ algorithm_ = source.algorithm_;
+ digest_type_ = source.digest_type_;
+ }
+
+ /// \brief Convert the DS-like data to a string.
+ ///
+ /// \return A \c string object that represents the DS-like data.
+ std::string
+ toText() const {
+ using namespace boost;
+ return (lexical_cast<string>(static_cast<int>(tag_)) +
+ " " + lexical_cast<string>(static_cast<int>(algorithm_)) +
+ " " + lexical_cast<string>(static_cast<int>(digest_type_)) +
+ " " + encodeHex(digest_));
+ }
+
+ /// \brief Render the DS-like data in the wire format to an OutputBuffer
+ /// object.
+ ///
+ /// \param buffer An output buffer to store the wire data.
+ void
+ toWire(OutputBuffer& buffer) const {
+ toWireCommon(buffer);
+ }
+
+ /// \brief Render the DS-like data in the wire format to an
+ /// AbstractMessageRenderer object.
+ ///
+ /// \param renderer A renderer object to send the wire data to.
+ void
+ toWire(AbstractMessageRenderer& renderer) const {
+ toWireCommon(renderer);
+ }
+
+ /// \brief Compare two instances of DS-like RDATA.
+ ///
+ /// It is up to the caller to make sure that \c other is an object of the
+ /// same \c DSLikeImpl class.
+ ///
+ /// \param other the right-hand operand to compare against.
+ /// \return < 0 if \c this would be sorted before \c other.
+ /// \return 0 if \c this is identical to \c other in terms of sorting
+ /// order.
+ /// \return > 0 if \c this would be sorted after \c other.
+ int
+ compare(const DSLikeImpl& other_ds) const {
+ if (tag_ != other_ds.tag_) {
+ return (tag_ < other_ds.tag_ ? -1 : 1);
+ }
+ if (algorithm_ != other_ds.algorithm_) {
+ return (algorithm_ < other_ds.algorithm_ ? -1 : 1);
+ }
+ if (digest_type_ != other_ds.digest_type_) {
+ return (digest_type_ < other_ds.digest_type_ ? -1 : 1);
+ }
+
+ size_t this_len = digest_.size();
+ size_t other_len = other_ds.digest_.size();
+ size_t cmplen = min(this_len, other_len);
+ int cmp = memcmp(&digest_[0], &other_ds.digest_[0], cmplen);
+ if (cmp != 0) {
+ return (cmp);
+ } else {
+ return ((this_len == other_len)
+ ? 0 : (this_len < other_len) ? -1 : 1);
+ }
+ }
+
+ /// \brief Accessors
+ uint16_t
+ getTag() const {
+ return (tag_);
+ }
+
+private:
+ // straightforward representation of DS RDATA fields
+ uint16_t tag_;
+ uint8_t algorithm_;
+ uint8_t digest_type_;
+ std::vector<uint8_t> digest_;
+};
+
+}
+}
+}
+}
+}
+#endif // __DS_LIKE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/detail/txt_like.h b/src/lib/dns/rdata/generic/detail/txt_like.h
index 1c88f96..a0ab7ac 100644
--- a/src/lib/dns/rdata/generic/detail/txt_like.h
+++ b/src/lib/dns/rdata/generic/detail/txt_like.h
@@ -188,15 +188,18 @@ public:
// This implementation is not efficient. Revisit this (TBD).
OutputBuffer this_buffer(0);
toWire(this_buffer);
+ uint8_t const* const this_data = (uint8_t const*)this_buffer.getData();
size_t this_len = this_buffer.getLength();
OutputBuffer other_buffer(0);
other.toWire(other_buffer);
+ uint8_t const* const other_data
+ = (uint8_t const*)other_buffer.getData();
const size_t other_len = other_buffer.getLength();
const size_t cmplen = min(this_len, other_len);
- const int cmp = memcmp(this_buffer.getData(), other_buffer.getData(),
- cmplen);
+ const int cmp = memcmp(this_data, other_data, cmplen);
+
if (cmp != 0) {
return (cmp);
} else {
diff --git a/src/lib/dns/rdata/generic/dlv_32769.cc b/src/lib/dns/rdata/generic/dlv_32769.cc
new file mode 100644
index 0000000..9887aa8
--- /dev/null
+++ b/src/lib/dns/rdata/generic/dlv_32769.cc
@@ -0,0 +1,121 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+
+#include <util/buffer.h>
+#include <util/encode/hex.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <dns/rdata/generic/detail/ds_like.h>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::encode;
+using namespace isc::dns::rdata::generic::detail;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(const string& ds_str) :
+ impl_(new DLVImpl(ds_str))
+{}
+
+/// \brief Constructor from wire-format data.
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new DLVImpl(buffer, rdata_len))
+{}
+
+/// \brief Copy constructor
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(const DLV& source) :
+ Rdata(), impl_(new DLVImpl(*source.impl_))
+{}
+
+/// \brief Assignment operator
+///
+/// PIMPL-induced logic
+DLV&
+DLV::operator=(const DLV& source) {
+ if (impl_ == source.impl_) {
+ return (*this);
+ }
+
+ DLVImpl* newimpl = new DLVImpl(*source.impl_);
+ delete impl_;
+ impl_ = newimpl;
+
+ return (*this);
+}
+
+/// \brief Destructor
+///
+/// Deallocates an internal resource.
+DLV::~DLV() {
+ delete impl_;
+}
+
+/// \brief Convert the \c DLV to a string.
+///
+/// A pass-thru to the corresponding implementation method.
+string
+DLV::toText() const {
+ return (impl_->toText());
+}
+
+/// \brief Render the \c DLV in the wire format to a OutputBuffer object
+///
+/// A pass-thru to the corresponding implementation method.
+void
+DLV::toWire(OutputBuffer& buffer) const {
+ impl_->toWire(buffer);
+}
+
+/// \brief Render the \c DLV in the wire format to a AbstractMessageRenderer
+/// object
+///
+/// A pass-thru to the corresponding implementation method.
+void
+DLV::toWire(AbstractMessageRenderer& renderer) const {
+ impl_->toWire(renderer);
+}
+
+/// \brief Compare two instances of \c DLV RDATA.
+///
+/// The type check is performed here. Otherwise, a pass-thru to the
+/// corresponding implementation method.
+int
+DLV::compare(const Rdata& other) const {
+ const DLV& other_ds = dynamic_cast<const DLV&>(other);
+
+ return (impl_->compare(*other_ds.impl_));
+}
+
+/// \brief Tag accessor
+uint16_t
+DLV::getTag() const {
+ return (impl_->getTag());
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/dlv_32769.h b/src/lib/dns/rdata/generic/dlv_32769.h
new file mode 100644
index 0000000..86cd98c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/dlv_32769.h
@@ -0,0 +1,77 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rrtype.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+namespace detail {
+template <class Type, uint16_t typeCode> class DSLikeImpl;
+}
+
+/// \brief \c rdata::generic::DLV class represents the DLV RDATA as defined in
+/// RFC4431.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DLV RDATA.
+class DLV : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
+ DLV& operator=(const DLV& source);
+
+ /// \brief The destructor.
+ ~DLV();
+
+ /// \brief Return the value of the Tag field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getTag() const;
+private:
+ typedef detail::DSLikeImpl<DLV, 32769> DLVImpl;
+ DLVImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/ds_43.cc b/src/lib/dns/rdata/generic/ds_43.cc
index 1b48456..20b62dc 100644
--- a/src/lib/dns/rdata/generic/ds_43.cc
+++ b/src/lib/dns/rdata/generic/ds_43.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -12,87 +12,32 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <iostream>
#include <string>
-#include <sstream>
-#include <vector>
-
-#include <boost/lexical_cast.hpp>
#include <util/buffer.h>
#include <util/encode/hex.h>
#include <dns/messagerenderer.h>
-#include <dns/name.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
-#include <stdio.h>
-#include <time.h>
+#include <dns/rdata/generic/detail/ds_like.h>
using namespace std;
using namespace isc::util;
using namespace isc::util::encode;
+using namespace isc::dns::rdata::generic::detail;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-struct DSImpl {
- // straightforward representation of DS RDATA fields
- DSImpl(uint16_t tag, uint8_t algorithm, uint8_t digest_type,
- const vector<uint8_t>& digest) :
- tag_(tag), algorithm_(algorithm), digest_type_(digest_type),
- digest_(digest)
- {}
-
- uint16_t tag_;
- uint8_t algorithm_;
- uint8_t digest_type_;
- const vector<uint8_t> digest_;
-};
-
DS::DS(const string& ds_str) :
- impl_(NULL)
-{
- istringstream iss(ds_str);
- unsigned int tag, algorithm, digest_type;
- stringbuf digestbuf;
-
- iss >> tag >> algorithm >> digest_type >> &digestbuf;
- if (iss.bad() || iss.fail()) {
- isc_throw(InvalidRdataText, "Invalid DS text");
- }
- if (tag > 0xffff) {
- isc_throw(InvalidRdataText, "DS tag out of range");
- }
- if (algorithm > 0xff) {
- isc_throw(InvalidRdataText, "DS algorithm out of range");
- }
- if (digest_type > 0xff) {
- isc_throw(InvalidRdataText, "DS digest type out of range");
- }
-
- vector<uint8_t> digest;
- decodeHex(digestbuf.str(), digest);
-
- impl_ = new DSImpl(tag, algorithm, digest_type, digest);
-}
-
-DS::DS(InputBuffer& buffer, size_t rdata_len) {
- if (rdata_len < 4) {
- isc_throw(InvalidRdataLength, "DS too short");
- }
-
- uint16_t tag = buffer.readUint16();
- uint16_t algorithm = buffer.readUint8();
- uint16_t digest_type = buffer.readUint8();
-
- rdata_len -= 4;
- vector<uint8_t> digest(rdata_len);
- buffer.readData(&digest[0], rdata_len);
+ impl_(new DSImpl(ds_str))
+{}
- impl_ = new DSImpl(tag, algorithm, digest_type, digest);
-}
+DS::DS(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new DSImpl(buffer, rdata_len))
+{}
DS::DS(const DS& source) :
Rdata(), impl_(new DSImpl(*source.impl_))
@@ -117,57 +62,29 @@ DS::~DS() {
string
DS::toText() const {
- using namespace boost;
- return (lexical_cast<string>(static_cast<int>(impl_->tag_)) +
- " " + lexical_cast<string>(static_cast<int>(impl_->algorithm_)) +
- " " + lexical_cast<string>(static_cast<int>(impl_->digest_type_)) +
- " " + encodeHex(impl_->digest_));
+ return (impl_->toText());
}
void
DS::toWire(OutputBuffer& buffer) const {
- buffer.writeUint16(impl_->tag_);
- buffer.writeUint8(impl_->algorithm_);
- buffer.writeUint8(impl_->digest_type_);
- buffer.writeData(&impl_->digest_[0], impl_->digest_.size());
+ impl_->toWire(buffer);
}
void
DS::toWire(AbstractMessageRenderer& renderer) const {
- renderer.writeUint16(impl_->tag_);
- renderer.writeUint8(impl_->algorithm_);
- renderer.writeUint8(impl_->digest_type_);
- renderer.writeData(&impl_->digest_[0], impl_->digest_.size());
+ impl_->toWire(renderer);
}
int
DS::compare(const Rdata& other) const {
const DS& other_ds = dynamic_cast<const DS&>(other);
- if (impl_->tag_ != other_ds.impl_->tag_) {
- return (impl_->tag_ < other_ds.impl_->tag_ ? -1 : 1);
- }
- if (impl_->algorithm_ != other_ds.impl_->algorithm_) {
- return (impl_->algorithm_ < other_ds.impl_->algorithm_ ? -1 : 1);
- }
- if (impl_->digest_type_ != other_ds.impl_->digest_type_) {
- return (impl_->digest_type_ < other_ds.impl_->digest_type_ ? -1 : 1);
- }
-
- size_t this_len = impl_->digest_.size();
- size_t other_len = other_ds.impl_->digest_.size();
- size_t cmplen = min(this_len, other_len);
- int cmp = memcmp(&impl_->digest_[0], &other_ds.impl_->digest_[0], cmplen);
- if (cmp != 0) {
- return (cmp);
- } else {
- return ((this_len == other_len) ? 0 : (this_len < other_len) ? -1 : 1);
- }
+ return (impl_->compare(*other_ds.impl_));
}
uint16_t
DS::getTag() const {
- return (impl_->tag_);
+ return (impl_->getTag());
}
// END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/ds_43.h b/src/lib/dns/rdata/generic/ds_43.h
index 03b19a0..2697f51 100644
--- a/src/lib/dns/rdata/generic/ds_43.h
+++ b/src/lib/dns/rdata/generic/ds_43.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+// BEGIN_HEADER_GUARD
+
#include <stdint.h>
#include <string>
@@ -21,8 +23,6 @@
#include <dns/rrttl.h>
#include <dns/rdata.h>
-// BEGIN_HEADER_GUARD
-
// BEGIN_ISC_NAMESPACE
// BEGIN_COMMON_DECLARATIONS
@@ -30,20 +30,41 @@
// BEGIN_RDATA_NAMESPACE
-struct DSImpl;
+namespace detail {
+template <class Type, uint16_t typeCode> class DSLikeImpl;
+}
+/// \brief \c rdata::generic::DS class represents the DS RDATA as defined in
+/// RFC3658.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DS RDATA.
class DS : public Rdata {
public:
// BEGIN_COMMON_MEMBERS
// END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
DS& operator=(const DS& source);
+
+ /// \brief The destructor.
~DS();
+ /// \brief Return the value of the Tag field.
///
- /// Specialized methods
- ///
+ /// This method never throws an exception.
uint16_t getTag() const;
private:
+ typedef detail::DSLikeImpl<DS, 43> DSImpl;
DSImpl* impl_;
};
diff --git a/src/lib/dns/rdata/generic/hinfo_13.cc b/src/lib/dns/rdata/generic/hinfo_13.cc
new file mode 100644
index 0000000..45f4209
--- /dev/null
+++ b/src/lib/dns/rdata/generic/hinfo_13.cc
@@ -0,0 +1,129 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <string>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/character_string.h>
+#include <util/strutil.h>
+
+using namespace std;
+using namespace boost;
+using namespace isc::util;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+
+HINFO::HINFO(const string& hinfo_str) {
+ string::const_iterator input_iterator = hinfo_str.begin();
+ cpu_ = getNextCharacterString(hinfo_str, input_iterator);
+
+ skipLeftSpaces(hinfo_str, input_iterator);
+
+ os_ = getNextCharacterString(hinfo_str, input_iterator);
+}
+
+HINFO::HINFO(InputBuffer& buffer, size_t rdata_len) {
+ cpu_ = getNextCharacterString(buffer, rdata_len);
+ os_ = getNextCharacterString(buffer, rdata_len);
+}
+
+HINFO::HINFO(const HINFO& source):
+ Rdata(), cpu_(source.cpu_), os_(source.os_)
+{
+}
+
+std::string
+HINFO::toText() const {
+ string result;
+ result += "\"";
+ result += cpu_;
+ result += "\" \"";
+ result += os_;
+ result += "\"";
+ return (result);
+}
+
+void
+HINFO::toWire(OutputBuffer& buffer) const {
+ toWireHelper(buffer);
+}
+
+void
+HINFO::toWire(AbstractMessageRenderer& renderer) const {
+ toWireHelper(renderer);
+}
+
+int
+HINFO::compare(const Rdata& other) const {
+ const HINFO& other_hinfo = dynamic_cast<const HINFO&>(other);
+
+ if (cpu_ < other_hinfo.cpu_) {
+ return (-1);
+ } else if (cpu_ > other_hinfo.cpu_) {
+ return (1);
+ }
+
+ if (os_ < other_hinfo.os_) {
+ return (-1);
+ } else if (os_ > other_hinfo.os_) {
+ return (1);
+ }
+
+ return (0);
+}
+
+const std::string&
+HINFO::getCPU() const {
+ return (cpu_);
+}
+
+const std::string&
+HINFO::getOS() const {
+ return (os_);
+}
+
+void
+HINFO::skipLeftSpaces(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText,
+ "Invalid HINFO text format, field is missing.");
+ }
+
+ if (!isspace(*input_iterator)) {
+ isc_throw(InvalidRdataText,
+ "Invalid HINFO text format, fields are not separated by space.");
+ }
+ // Skip white spaces
+ while (input_iterator < input_str.end() && isspace(*input_iterator)) {
+ ++input_iterator;
+ }
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/hinfo_13.h b/src/lib/dns/rdata/generic/hinfo_13.h
new file mode 100644
index 0000000..8513419
--- /dev/null
+++ b/src/lib/dns/rdata/generic/hinfo_13.h
@@ -0,0 +1,77 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <util/buffer.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c HINFO class represents the HINFO rdata defined in
+/// RFC1034, RFC1035
+///
+/// This class implements the basic interfaces inherited from the
+/// \c rdata::Rdata class, and provides accessors specific to the
+/// HINFO rdata.
+class HINFO : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ // HINFO specific methods
+ const std::string& getCPU() const;
+ const std::string& getOS() const;
+
+private:
+ /// Skip the left whitespaces of the input string
+ ///
+ /// \param input_str The input string
+ /// \param input_iterator From which the skipping started
+ void skipLeftSpaces(const std::string& input_str,
+ std::string::const_iterator& input_iterator);
+
+ /// Helper template function for toWire()
+ ///
+ /// \param outputer Where to write data in
+ template <typename T>
+ void toWireHelper(T& outputer) const {
+ outputer.writeUint8(cpu_.size());
+ outputer.writeData(cpu_.c_str(), cpu_.size());
+
+ outputer.writeUint8(os_.size());
+ outputer.writeData(os_.c_str(), os_.size());
+ }
+
+ std::string cpu_;
+ std::string os_;
+};
+
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/minfo_14.cc b/src/lib/dns/rdata/generic/minfo_14.cc
new file mode 100644
index 0000000..aa5272c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/minfo_14.cc
@@ -0,0 +1,156 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c minfo_str must be formatted as follows:
+/// \code <rmailbox name> <emailbox name>
+/// \endcode
+/// where both fields must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "rmail.example.com. email.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation for names fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+MINFO::MINFO(const std::string& minfo_str) :
+ // We cannot construct both names in the initialization list due to the
+ // necessary text processing, so we have to initialize them with a dummy
+ // name and replace them later.
+ rmailbox_(Name::ROOT_NAME()), emailbox_(Name::ROOT_NAME())
+{
+ istringstream iss(minfo_str);
+ string rmailbox_str, emailbox_str;
+ iss >> rmailbox_str >> emailbox_str;
+
+ // Validation: A valid MINFO RR must have exactly two fields.
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText, "Invalid MINFO text: " << minfo_str);
+ }
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Invalid MINFO text (redundant field): "
+ << minfo_str);
+ }
+
+ rmailbox_ = Name(rmailbox_str);
+ emailbox_ = Name(emailbox_str);
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation for names fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+MINFO::MINFO(InputBuffer& buffer, size_t) :
+ rmailbox_(buffer), emailbox_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+MINFO::MINFO(const MINFO& other) :
+ Rdata(), rmailbox_(other.rmailbox_), emailbox_(other.emailbox_)
+{}
+
+/// \brief Convert the \c MINFO to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c MINFO(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c MINFO object.
+std::string
+MINFO::toText() const {
+ return (rmailbox_.toText() + " " + emailbox_.toText());
+}
+
+/// \brief Render the \c MINFO in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+MINFO::toWire(OutputBuffer& buffer) const {
+ rmailbox_.toWire(buffer);
+ emailbox_.toWire(buffer);
+}
+
+MINFO&
+MINFO::operator=(const MINFO& source) {
+ rmailbox_ = source.rmailbox_;
+ emailbox_ = source.emailbox_;
+
+ return (*this);
+}
+
+/// \brief Render the \c MINFO in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE MINFO is "well-known", the rmailbox and
+/// emailbox fields (domain names) will be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+MINFO::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeName(rmailbox_);
+ renderer.writeName(emailbox_);
+}
+
+/// \brief Compare two instances of \c MINFO RDATA.
+///
+/// See documentation in \c Rdata.
+int
+MINFO::compare(const Rdata& other) const {
+ const MINFO& other_minfo = dynamic_cast<const MINFO&>(other);
+
+ const int cmp = compareNames(rmailbox_, other_minfo.rmailbox_);
+ if (cmp != 0) {
+ return (cmp);
+ }
+ return (compareNames(emailbox_, other_minfo.emailbox_));
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/minfo_14.h b/src/lib/dns/rdata/generic/minfo_14.h
new file mode 100644
index 0000000..f3ee1d0
--- /dev/null
+++ b/src/lib/dns/rdata/generic/minfo_14.h
@@ -0,0 +1,82 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::generic::MINFO class represents the MINFO RDATA as
+/// defined in RFC1035.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// MINFO RDATA.
+class MINFO : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Define the assignment operator.
+ ///
+ /// \exception std::bad_alloc Memory allocation fails in copying
+ /// internal member variables (this should be very rare).
+ MINFO& operator=(const MINFO& source);
+
+ /// \brief Return the value of the rmailbox field.
+ ///
+ /// \exception std::bad_alloc If resource allocation for the returned
+ /// \c Name fails.
+ ///
+ /// \note
+ /// Unlike the case of some other RDATA classes (such as
+ /// \c NS::getNSName()), this method constructs a new \c Name object
+ /// and returns it, instead of returning a reference to a \c Name object
+ /// internally maintained in the class (which is a private member).
+ /// This is based on the observation that this method will be rarely
+ /// used and even when it's used it will not be in a performance context
+ /// (for example, a recursive resolver won't need this field in its
+ /// resolution process). By returning a new object we have flexibility
+ /// of changing the internal representation without the risk of changing
+ /// the interface or method property.
+ /// The same note applies to the \c getEmailbox() method.
+ Name getRmailbox() const { return (rmailbox_); }
+
+ /// \brief Return the value of the emailbox field.
+ ///
+ /// \exception std::bad_alloc If resource allocation for the returned
+ /// \c Name fails.
+ Name getEmailbox() const { return (emailbox_); }
+
+private:
+ Name rmailbox_;
+ Name emailbox_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/naptr_35.cc b/src/lib/dns/rdata/generic/naptr_35.cc
new file mode 100644
index 0000000..129bf6c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/naptr_35.cc
@@ -0,0 +1,220 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <string>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/character_string.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace boost;
+using namespace isc::util;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+namespace {
+/// Skip the left whitespaces of the input string
+///
+/// \param input_str The input string
+/// \param input_iterator From which the skipping started
+void
+skipLeftSpaces(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, field is missing.");
+ }
+
+ if (!isspace(*input_iterator)) {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, fields are not separated by space.");
+ }
+ // Skip white spaces
+ while (input_iterator < input_str.end() && isspace(*input_iterator)) {
+ ++input_iterator;
+ }
+}
+
+} // Anonymous namespace
+
+NAPTR::NAPTR(InputBuffer& buffer, size_t len):
+ replacement_(".")
+{
+ order_ = buffer.readUint16();
+ preference_ = buffer.readUint16();
+
+ flags_ = getNextCharacterString(buffer, len);
+ services_ = getNextCharacterString(buffer, len);
+ regexp_ = getNextCharacterString(buffer, len);
+ replacement_ = Name(buffer);
+}
+
+NAPTR::NAPTR(const std::string& naptr_str):
+ replacement_(".")
+{
+ istringstream iss(naptr_str);
+ uint16_t order;
+ uint16_t preference;
+
+ iss >> order >> preference;
+
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText, "Invalid NAPTR text format");
+ }
+
+ order_ = order;
+ preference_ = preference;
+
+ string::const_iterator input_iterator = naptr_str.begin() + iss.tellg();
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ flags_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ services_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ regexp_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ if (input_iterator < naptr_str.end()) {
+ string replacementStr(input_iterator, naptr_str.end());
+
+ replacement_ = Name(replacementStr);
+ } else {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, replacement field is missing");
+ }
+}
+
+NAPTR::NAPTR(const NAPTR& naptr):
+ Rdata(), order_(naptr.order_), preference_(naptr.preference_),
+ flags_(naptr.flags_), services_(naptr.services_), regexp_(naptr.regexp_),
+ replacement_(naptr.replacement_)
+{
+}
+
+void
+NAPTR::toWire(OutputBuffer& buffer) const {
+ toWireHelper(buffer);
+}
+
+void
+NAPTR::toWire(AbstractMessageRenderer& renderer) const {
+ toWireHelper(renderer);
+}
+
+string
+NAPTR::toText() const {
+ string result;
+ result += lexical_cast<string>(order_);
+ result += " ";
+ result += lexical_cast<string>(preference_);
+ result += " \"";
+ result += flags_;
+ result += "\" \"";
+ result += services_;
+ result += "\" \"";
+ result += regexp_;
+ result += "\" ";
+ result += replacement_.toText();
+ return (result);
+}
+
+int
+NAPTR::compare(const Rdata& other) const {
+ const NAPTR other_naptr = dynamic_cast<const NAPTR&>(other);
+
+ if (order_ < other_naptr.order_) {
+ return (-1);
+ } else if (order_ > other_naptr.order_) {
+ return (1);
+ }
+
+ if (preference_ < other_naptr.preference_) {
+ return (-1);
+ } else if (preference_ > other_naptr.preference_) {
+ return (1);
+ }
+
+ if (flags_ < other_naptr.flags_) {
+ return (-1);
+ } else if (flags_ > other_naptr.flags_) {
+ return (1);
+ }
+
+ if (services_ < other_naptr.services_) {
+ return (-1);
+ } else if (services_ > other_naptr.services_) {
+ return (1);
+ }
+
+ if (regexp_ < other_naptr.regexp_) {
+ return (-1);
+ } else if (regexp_ > other_naptr.regexp_) {
+ return (1);
+ }
+
+ return (compareNames(replacement_, other_naptr.replacement_));
+}
+
+uint16_t
+NAPTR::getOrder() const {
+ return (order_);
+}
+
+uint16_t
+NAPTR::getPreference() const {
+ return (preference_);
+}
+
+const std::string&
+NAPTR::getFlags() const {
+ return (flags_);
+}
+
+const std::string&
+NAPTR::getServices() const {
+ return (services_);
+}
+
+const std::string&
+NAPTR::getRegexp() const {
+ return (regexp_);
+}
+
+const Name&
+NAPTR::getReplacement() const {
+ return (replacement_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/naptr_35.h b/src/lib/dns/rdata/generic/naptr_35.h
new file mode 100644
index 0000000..ca16b3c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/naptr_35.h
@@ -0,0 +1,83 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <util/buffer.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c NAPTR class represents the NAPTR rdata defined in
+/// RFC2915, RFC2168 and RFC3403
+///
+/// This class implements the basic interfaces inherited from the
+/// \c rdata::Rdata class, and provides accessors specific to the
+/// NAPTR rdata.
+class NAPTR : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ // NAPTR specific methods
+ uint16_t getOrder() const;
+ uint16_t getPreference() const;
+ const std::string& getFlags() const;
+ const std::string& getServices() const;
+ const std::string& getRegexp() const;
+ const Name& getReplacement() const;
+private:
+ /// Helper template function for toWire()
+ ///
+ /// \param outputer Where to write data in
+ template <typename T>
+ void toWireHelper(T& outputer) const {
+ outputer.writeUint16(order_);
+ outputer.writeUint16(preference_);
+
+ outputer.writeUint8(flags_.size());
+ outputer.writeData(flags_.c_str(), flags_.size());
+
+ outputer.writeUint8(services_.size());
+ outputer.writeData(services_.c_str(), services_.size());
+
+ outputer.writeUint8(regexp_.size());
+ outputer.writeData(regexp_.c_str(), regexp_.size());
+
+ replacement_.toWire(outputer);
+ }
+
+ uint16_t order_;
+ uint16_t preference_;
+ std::string flags_;
+ std::string services_;
+ std::string regexp_;
+ Name replacement_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/rp_17.cc b/src/lib/dns/rdata/generic/rp_17.cc
index b8b2ba2..781b55d 100644
--- a/src/lib/dns/rdata/generic/rp_17.cc
+++ b/src/lib/dns/rdata/generic/rp_17.cc
@@ -24,6 +24,7 @@
using namespace std;
using namespace isc::dns;
+using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/rrsig_46.cc b/src/lib/dns/rdata/generic/rrsig_46.cc
index fc8e340..59ff030 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.cc
+++ b/src/lib/dns/rdata/generic/rrsig_46.cc
@@ -244,7 +244,7 @@ RRSIG::compare(const Rdata& other) const {
}
const RRType&
-RRSIG::typeCovered() {
+RRSIG::typeCovered() const {
return (impl_->covered_);
}
diff --git a/src/lib/dns/rdata/generic/rrsig_46.h b/src/lib/dns/rdata/generic/rrsig_46.h
index b8e6306..b32c17f 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.h
+++ b/src/lib/dns/rdata/generic/rrsig_46.h
@@ -40,7 +40,7 @@ public:
~RRSIG();
// specialized methods
- const RRType& typeCovered();
+ const RRType& typeCovered() const;
private:
RRSIGImpl* impl_;
};
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.cc b/src/lib/dns/rdata/in_1/dhcid_49.cc
new file mode 100644
index 0000000..0a9a23c
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/dhcid_49.cc
@@ -0,0 +1,145 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+#include <util/encode/hex.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \param dhcid_str A base-64 representation of the DHCID binary data.
+/// The data is considered to be opaque, but a sanity check is performed.
+///
+/// <b>Exceptions</b>
+///
+/// \c dhcid_str must be a valid BASE-64 string, otherwise an exception
+/// of class \c isc::BadValue will be thrown;
+/// the binary data should consist of at leat of 3 octets as per RFC4701:
+/// < 2 octets > Identifier type code
+/// < 1 octet > Digest type code
+/// < n octets > Digest (length depends on digest type)
+/// If the data is less than 3 octets (i.e. it cannot contain id type code and
+/// digest type code), an exception of class \c InvalidRdataLength is thrown.
+DHCID::DHCID(const string& dhcid_str) {
+ istringstream iss(dhcid_str);
+ stringbuf digestbuf;
+
+ iss >> &digestbuf;
+ isc::util::encode::decodeHex(digestbuf.str(), digest_);
+
+ // RFC4701 states DNS software should consider the RDATA section to
+ // be opaque, but there must be at least three bytes in the data:
+ // < 2 octets > Identifier type code
+ // < 1 octet > Digest type code
+ if (digest_.size() < 3) {
+ isc_throw(InvalidRdataLength, "DHCID length " << digest_.size() <<
+ " too short, need at least 3 bytes");
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// \param buffer A buffer storing the wire format data.
+/// \param rdata_len The length of the RDATA in bytes
+///
+/// <b>Exceptions</b>
+/// \c InvalidRdataLength is thrown if \c rdata_len is than minimum of 3 octets
+DHCID::DHCID(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len < 3) {
+ isc_throw(InvalidRdataLength, "DHCID length " << rdata_len <<
+ " too short, need at least 3 bytes");
+ }
+
+ digest_.resize(rdata_len);
+ buffer.readData(&digest_[0], rdata_len);
+}
+
+/// \brief The copy constructor.
+///
+/// This trivial copy constructor never throws an exception.
+DHCID::DHCID(const DHCID& other) : Rdata(), digest_(other.digest_)
+{}
+
+/// \brief Render the \c DHCID in the wire format.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+DHCID::toWire(OutputBuffer& buffer) const {
+ buffer.writeData(&digest_[0], digest_.size());
+}
+
+/// \brief Render the \c DHCID in the wire format into a
+/// \c MessageRenderer object.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer in which the \c DHCID is to be stored.
+void
+DHCID::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeData(&digest_[0], digest_.size());
+}
+
+/// \brief Convert the \c DHCID to a string.
+///
+/// This method returns a \c std::string object representing the \c DHCID.
+///
+/// \return A string representation of \c DHCID.
+string
+DHCID::toText() const {
+ return (isc::util::encode::encodeHex(digest_));
+}
+
+/// \brief Compare two instances of \c DHCID RDATA.
+///
+/// See documentation in \c Rdata.
+int
+DHCID::compare(const Rdata& other) const {
+ const DHCID& other_dhcid = dynamic_cast<const DHCID&>(other);
+
+ size_t this_len = digest_.size();
+ size_t other_len = other_dhcid.digest_.size();
+ size_t cmplen = min(this_len, other_len);
+ int cmp = memcmp(&digest_[0], &other_dhcid.digest_[0], cmplen);
+ if (cmp != 0) {
+ return (cmp);
+ } else {
+ return ((this_len == other_len) ? 0 : (this_len < other_len) ? -1 : 1);
+ }
+}
+
+/// \brief Accessor method to get the DHCID digest
+///
+/// \return A reference to the binary DHCID data
+const std::vector<uint8_t>&
+DHCID::getDigest() const {
+ return (digest_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.h b/src/lib/dns/rdata/in_1/dhcid_49.h
new file mode 100644
index 0000000..919395f
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/dhcid_49.h
@@ -0,0 +1,58 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+#include <vector>
+
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::DHCID class represents the DHCID RDATA as defined %in
+/// RFC4701.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DHCID RDATA.
+class DHCID : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Return the digest.
+ ///
+ /// This method never throws an exception.
+ const std::vector<uint8_t>& getDigest() const;
+
+private:
+ /// \brief Private data representation
+ ///
+ /// Opaque data at least 3 octets long as per RFC4701.
+ ///
+ std::vector<uint8_t> digest_;
+};
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/in_1/srv_33.h b/src/lib/dns/rdata/in_1/srv_33.h
index d067021..32b7dc0 100644
--- a/src/lib/dns/rdata/in_1/srv_33.h
+++ b/src/lib/dns/rdata/in_1/srv_33.h
@@ -12,13 +12,13 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+// BEGIN_HEADER_GUARD
+
#include <stdint.h>
#include <dns/name.h>
#include <dns/rdata.h>
-// BEGIN_HEADER_GUARD
-
// BEGIN_ISC_NAMESPACE
// BEGIN_COMMON_DECLARATIONS
diff --git a/src/lib/dns/rdata/template.cc b/src/lib/dns/rdata/template.cc
index d9f08ee..e85f82c 100644
--- a/src/lib/dns/rdata/template.cc
+++ b/src/lib/dns/rdata/template.cc
@@ -18,6 +18,7 @@
#include <dns/messagerenderer.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rrtype.h>
using namespace std;
using namespace isc::util;
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index aa807fa..9171fd8 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -33,9 +33,10 @@ run_unittests_SOURCES += rdata_txt_like_unittest.cc
run_unittests_SOURCES += rdata_mx_unittest.cc
run_unittests_SOURCES += rdata_ptr_unittest.cc rdata_cname_unittest.cc
run_unittests_SOURCES += rdata_dname_unittest.cc
+run_unittests_SOURCES += rdata_afsdb_unittest.cc
run_unittests_SOURCES += rdata_opt_unittest.cc
run_unittests_SOURCES += rdata_dnskey_unittest.cc
-run_unittests_SOURCES += rdata_ds_unittest.cc
+run_unittests_SOURCES += rdata_ds_like_unittest.cc
run_unittests_SOURCES += rdata_nsec_unittest.cc
run_unittests_SOURCES += rdata_nsec3_unittest.cc
run_unittests_SOURCES += rdata_nsecbitmap_unittest.cc
@@ -43,7 +44,10 @@ run_unittests_SOURCES += rdata_nsec3param_unittest.cc
run_unittests_SOURCES += rdata_rrsig_unittest.cc
run_unittests_SOURCES += rdata_rp_unittest.cc
run_unittests_SOURCES += rdata_srv_unittest.cc
+run_unittests_SOURCES += rdata_minfo_unittest.cc
run_unittests_SOURCES += rdata_tsig_unittest.cc
+run_unittests_SOURCES += rdata_naptr_unittest.cc
+run_unittests_SOURCES += rdata_hinfo_unittest.cc
run_unittests_SOURCES += rrset_unittest.cc rrsetlist_unittest.cc
run_unittests_SOURCES += question_unittest.cc
run_unittests_SOURCES += rrparamregistry_unittest.cc
@@ -53,6 +57,7 @@ run_unittests_SOURCES += tsig_unittest.cc
run_unittests_SOURCES += tsigerror_unittest.cc
run_unittests_SOURCES += tsigkey_unittest.cc
run_unittests_SOURCES += tsigrecord_unittest.cc
+run_unittests_SOURCES += character_string_unittest.cc
run_unittests_SOURCES += run_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
# We shouldn't need to include BOTAN_LDFLAGS here, but there
diff --git a/src/lib/dns/tests/character_string_unittest.cc b/src/lib/dns/tests/character_string_unittest.cc
new file mode 100644
index 0000000..5fed9eb
--- /dev/null
+++ b/src/lib/dns/tests/character_string_unittest.cc
@@ -0,0 +1,92 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#include <gtest/gtest.h>
+
+#include <dns/rdata.h>
+#include <dns/tests/unittest_util.h>
+#include <dns/character_string.h>
+
+using isc::UnitTestUtil;
+
+using namespace std;
+using namespace isc;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+using namespace isc::dns::rdata;
+
+namespace {
+
+class CharacterString {
+public:
+ CharacterString(const string& str){
+ string::const_iterator it = str.begin();
+ characterStr_ = getNextCharacterString(str, it);
+ }
+ const string& str() const { return characterStr_; }
+private:
+ string characterStr_;
+};
+
+TEST(CharacterStringTest, testNormalCase) {
+ CharacterString cstr1("foo");
+ EXPECT_EQ(string("foo"), cstr1.str());
+
+ // Test <character-string> that separated by space
+ CharacterString cstr2("foo bar");
+ EXPECT_EQ(string("foo"), cstr2.str());
+
+ // Test <character-string> that separated by quotes
+ CharacterString cstr3("\"foo bar\"");
+ EXPECT_EQ(string("foo bar"), cstr3.str());
+
+ // Test <character-string> that not separate by quotes but ended with quotes
+ CharacterString cstr4("foo\"");
+ EXPECT_EQ(string("foo\""), cstr4.str());
+}
+
+TEST(CharacterStringTest, testBadCase) {
+ // The <character-string> that started with quotes should also be ended
+ // with quotes
+ EXPECT_THROW(CharacterString cstr("\"foo"), InvalidRdataText);
+
+ // The string length cannot exceed 255 characters
+ string str;
+ for (int i = 0; i < 257; ++i) {
+ str += 'A';
+ }
+ EXPECT_THROW(CharacterString cstr(str), CharStringTooLong);
+}
+
+TEST(CharacterStringTest, testEscapeCharacter) {
+ CharacterString cstr1("foo\\bar");
+ EXPECT_EQ(string("foobar"), cstr1.str());
+
+ CharacterString cstr2("foo\\\\bar");
+ EXPECT_EQ(string("foo\\bar"), cstr2.str());
+
+ CharacterString cstr3("fo\\111bar");
+ EXPECT_EQ(string("foobar"), cstr3.str());
+
+ CharacterString cstr4("fo\\1112bar");
+ EXPECT_EQ(string("foo2bar"), cstr4.str());
+
+ // There must be at least 3 digits followed by '\'
+ EXPECT_THROW(CharacterString cstr("foo\\98ar"), InvalidRdataText);
+ EXPECT_THROW(CharacterString cstr("foo\\9ar"), InvalidRdataText);
+ EXPECT_THROW(CharacterString cstr("foo\\98"), InvalidRdataText);
+}
+
+} // namespace
diff --git a/src/lib/dns/tests/message_unittest.cc b/src/lib/dns/tests/message_unittest.cc
index 6430626..f068791 100644
--- a/src/lib/dns/tests/message_unittest.cc
+++ b/src/lib/dns/tests/message_unittest.cc
@@ -118,16 +118,20 @@ protected:
vector<unsigned char> received_data;
vector<unsigned char> expected_data;
- void factoryFromFile(Message& message, const char* datafile);
+ void factoryFromFile(Message& message, const char* datafile,
+ Message::ParseOptions options =
+ Message::PARSE_DEFAULT);
};
void
-MessageTest::factoryFromFile(Message& message, const char* datafile) {
+MessageTest::factoryFromFile(Message& message, const char* datafile,
+ Message::ParseOptions options)
+{
received_data.clear();
UnitTestUtil::readWireData(datafile, received_data);
InputBuffer buffer(&received_data[0], received_data.size());
- message.fromWire(buffer);
+ message.fromWire(buffer, options);
}
TEST_F(MessageTest, headerFlag) {
@@ -175,7 +179,6 @@ TEST_F(MessageTest, headerFlag) {
EXPECT_THROW(message_parse.setHeaderFlag(Message::HEADERFLAG_QR),
InvalidMessageOperation);
}
-
TEST_F(MessageTest, getEDNS) {
EXPECT_FALSE(message_parse.getEDNS()); // by default EDNS isn't set
@@ -532,7 +535,46 @@ TEST_F(MessageTest, appendSection) {
}
+TEST_F(MessageTest, parseHeader) {
+ received_data.clear();
+ UnitTestUtil::readWireData("message_fromWire1", received_data);
+
+ // parseHeader() isn't allowed in the render mode.
+ InputBuffer buffer(&received_data[0], received_data.size());
+ EXPECT_THROW(message_render.parseHeader(buffer), InvalidMessageOperation);
+
+ message_parse.parseHeader(buffer);
+ EXPECT_EQ(0x1035, message_parse.getQid());
+ EXPECT_EQ(Opcode::QUERY(), message_parse.getOpcode());
+ EXPECT_EQ(Rcode::NOERROR(), message_parse.getRcode());
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_QR));
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_AA));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_TC));
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_RD));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_RA));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_AD));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_CD));
+ EXPECT_EQ(1, message_parse.getRRCount(Message::SECTION_QUESTION));
+ EXPECT_EQ(2, message_parse.getRRCount(Message::SECTION_ANSWER));
+ EXPECT_EQ(0, message_parse.getRRCount(Message::SECTION_AUTHORITY));
+ EXPECT_EQ(0, message_parse.getRRCount(Message::SECTION_ADDITIONAL));
+
+ // Only the header part should have been examined.
+ EXPECT_EQ(12, buffer.getPosition()); // 12 = size of the header section
+ EXPECT_TRUE(message_parse.beginQuestion() == message_parse.endQuestion());
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_ANSWER) ==
+ message_parse.endSection(Message::SECTION_ANSWER));
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_AUTHORITY) ==
+ message_parse.endSection(Message::SECTION_AUTHORITY));
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_ADDITIONAL) ==
+ message_parse.endSection(Message::SECTION_ADDITIONAL));
+}
+
TEST_F(MessageTest, fromWire) {
+ // fromWire() isn't allowed in the render mode.
+ EXPECT_THROW(factoryFromFile(message_render, "message_fromWire1"),
+ InvalidMessageOperation);
+
factoryFromFile(message_parse, "message_fromWire1");
EXPECT_EQ(0x1035, message_parse.getQid());
EXPECT_EQ(Opcode::QUERY(), message_parse.getOpcode());
@@ -564,6 +606,87 @@ TEST_F(MessageTest, fromWire) {
EXPECT_TRUE(it->isLast());
}
+TEST_F(MessageTest, fromWireShortBuffer) {
+ // We trim a valid message (ending with an SOA RR) for one byte.
+ // fromWire() should throw an exception while parsing the trimmed RR.
+ UnitTestUtil::readWireData("message_fromWire22.wire", received_data);
+ InputBuffer buffer(&received_data[0], received_data.size() - 1);
+ EXPECT_THROW(message_parse.fromWire(buffer), InvalidBufferPosition);
+}
+
+TEST_F(MessageTest, fromWireCombineRRs) {
+ // This message contains 3 RRs in the answer section in the order of
+ // A, AAAA, A types. fromWire() should combine the two A RRs into a
+ // single RRset by default.
+ factoryFromFile(message_parse, "message_fromWire19.wire");
+
+ RRsetIterator it = message_parse.beginSection(Message::SECTION_ANSWER);
+ RRsetIterator it_end = message_parse.endSection(Message::SECTION_ANSWER);
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(2, (*it)->getRdataCount());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::AAAA(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+}
+
+// A helper function for a test pattern commonly used in several tests below.
+void
+preserveRRCheck(const Message& message, Message::Section section) {
+ RRsetIterator it = message.beginSection(section);
+ RRsetIterator it_end = message.endSection(section);
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("192.0.2.1", (*it)->getRdataIterator()->getCurrent().toText());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::AAAA(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("2001:db8::1", (*it)->getRdataIterator()->getCurrent().toText());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("192.0.2.2", (*it)->getRdataIterator()->getCurrent().toText());
+}
+
+TEST_F(MessageTest, fromWirePreserveAnswer) {
+ // Using the same data as the previous test, but specify the PRESERVE_ORDER
+ // option. The received order of RRs should be preserved, and each RR
+ // should be stored in a single RRset.
+ factoryFromFile(message_parse, "message_fromWire19.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve answer RRs");
+ preserveRRCheck(message_parse, Message::SECTION_ANSWER);
+ }
+}
+
+TEST_F(MessageTest, fromWirePreserveAuthority) {
+ // Same for the previous test, but for the authority section.
+ factoryFromFile(message_parse, "message_fromWire20.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve authority RRs");
+ preserveRRCheck(message_parse, Message::SECTION_AUTHORITY);
+ }
+}
+
+TEST_F(MessageTest, fromWirePreserveAdditional) {
+ // Same for the previous test, but for the additional section.
+ factoryFromFile(message_parse, "message_fromWire21.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve additional RRs");
+ preserveRRCheck(message_parse, Message::SECTION_ADDITIONAL);
+ }
+}
+
TEST_F(MessageTest, EDNS0ExtRcode) {
// Extended Rcode = BADVERS
factoryFromFile(message_parse, "message_fromWire10.wire");
diff --git a/src/lib/dns/tests/rdata_afsdb_unittest.cc b/src/lib/dns/tests/rdata_afsdb_unittest.cc
new file mode 100644
index 0000000..7df8d83
--- /dev/null
+++ b/src/lib/dns/tests/rdata_afsdb_unittest.cc
@@ -0,0 +1,210 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+const char* const afsdb_text = "1 afsdb.example.com.";
+const char* const afsdb_text2 = "0 root.example.com.";
+const char* const too_long_label("012345678901234567890123456789"
+ "0123456789012345678901234567890123");
+
+namespace {
+class Rdata_AFSDB_Test : public RdataTest {
+protected:
+ Rdata_AFSDB_Test() :
+ rdata_afsdb(string(afsdb_text)), rdata_afsdb2(string(afsdb_text2))
+ {}
+
+ const generic::AFSDB rdata_afsdb;
+ const generic::AFSDB rdata_afsdb2;
+ vector<uint8_t> expected_wire;
+};
+
+
+TEST_F(Rdata_AFSDB_Test, createFromText) {
+ EXPECT_EQ(1, rdata_afsdb.getSubtype());
+ EXPECT_EQ(Name("afsdb.example.com."), rdata_afsdb.getServer());
+
+ EXPECT_EQ(0, rdata_afsdb2.getSubtype());
+ EXPECT_EQ(Name("root.example.com."), rdata_afsdb2.getServer());
+}
+
+TEST_F(Rdata_AFSDB_Test, badText) {
+ // subtype is too large
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("99999999 afsdb.example.com."),
+ InvalidRdataText);
+ // incomplete text
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("10"), InvalidRdataText);
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("SPOON"), InvalidRdataText);
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("1root.example.com."), InvalidRdataText);
+ // number of fields (must be 2) is incorrect
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("10 afsdb. example.com."),
+ InvalidRdataText);
+ // bad name
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("1 afsdb.example.com." +
+ string(too_long_label)), TooLongLabel);
+}
+
+TEST_F(Rdata_AFSDB_Test, assignment) {
+ generic::AFSDB copy((string(afsdb_text2)));
+ copy = rdata_afsdb;
+ EXPECT_EQ(0, copy.compare(rdata_afsdb));
+
+ // Check if the copied data is valid even after the original is deleted
+ generic::AFSDB* copy2 = new generic::AFSDB(rdata_afsdb);
+ generic::AFSDB copy3((string(afsdb_text2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_afsdb));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_afsdb));
+}
+
+TEST_F(Rdata_AFSDB_Test, createFromWire) {
+ // uncompressed names
+ EXPECT_EQ(0, rdata_afsdb.compare(
+ *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire1.wire")));
+ // compressed name
+ EXPECT_EQ(0, rdata_afsdb.compare(
+ *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire2.wire", 13)));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire3.wire"),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire4.wire"),
+ InvalidRdataLength);
+ // bogus server name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire5.wire"),
+ DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireBuffer) {
+ // construct actual data
+ rdata_afsdb.toWire(obuffer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ &expected_wire[0], expected_wire.size());
+
+ // clear buffer for the next test
+ obuffer.clear();
+
+ // construct actual data
+ Name("example.com.").toWire(obuffer);
+ rdata_afsdb2.toWire(obuffer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireRenderer) {
+ // similar to toWireBuffer, but names in RDATA could be compressed due to
+ // preceding names. Actually they must not be compressed according to
+ // RFC3597, and this test checks that.
+
+ // construct actual data
+ rdata_afsdb.toWire(renderer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ renderer.getData(), renderer.getLength(),
+ &expected_wire[0], expected_wire.size());
+
+ // clear renderer for the next test
+ renderer.clear();
+
+ // construct actual data
+ Name("example.com.").toWire(obuffer);
+ rdata_afsdb2.toWire(renderer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ renderer.getData(), renderer.getLength(),
+ &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toText) {
+ EXPECT_EQ(afsdb_text, rdata_afsdb.toText());
+ EXPECT_EQ(afsdb_text2, rdata_afsdb2.toText());
+}
+
+TEST_F(Rdata_AFSDB_Test, compare) {
+ // check reflexivity
+ EXPECT_EQ(0, rdata_afsdb.compare(rdata_afsdb));
+
+ // name must be compared in case-insensitive manner
+ EXPECT_EQ(0, rdata_afsdb.compare(generic::AFSDB("1 "
+ "AFSDB.example.com.")));
+
+ const generic::AFSDB small1("10 afsdb.example.com");
+ const generic::AFSDB large1("65535 afsdb.example.com");
+ const generic::AFSDB large2("256 afsdb.example.com");
+
+ // confirm these are compared as unsigned values
+ EXPECT_GT(0, rdata_afsdb.compare(large1));
+ EXPECT_LT(0, large1.compare(rdata_afsdb));
+
+ // confirm these are compared in network byte order
+ EXPECT_GT(0, small1.compare(large2));
+ EXPECT_LT(0, large2.compare(small1));
+
+ // another AFSDB whose server name is larger than that of rdata_afsdb.
+ const generic::AFSDB large3("256 zzzzz.example.com");
+ EXPECT_GT(0, large2.compare(large3));
+ EXPECT_LT(0, large3.compare(large2));
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_afsdb.compare(*rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_ds_like_unittest.cc b/src/lib/dns/tests/rdata_ds_like_unittest.cc
new file mode 100644
index 0000000..9b29446
--- /dev/null
+++ b/src/lib/dns/tests/rdata_ds_like_unittest.cc
@@ -0,0 +1,171 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <string>
+
+#include <util/buffer.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+namespace {
+// hacks to make templates work
+template <class T>
+class RRTYPE : public RRType {
+public:
+ RRTYPE();
+};
+
+template<> RRTYPE<generic::DS>::RRTYPE() : RRType(RRType::DS()) {}
+template<> RRTYPE<generic::DLV>::RRTYPE() : RRType(RRType::DLV()) {}
+
+template <class DS_LIKE>
+class Rdata_DS_LIKE_Test : public RdataTest {
+protected:
+ static DS_LIKE const rdata_ds_like;
+};
+
+string ds_like_txt("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+
+template <class DS_LIKE>
+DS_LIKE const Rdata_DS_LIKE_Test<DS_LIKE>::rdata_ds_like(ds_like_txt);
+
+// The list of types we want to test.
+typedef testing::Types<generic::DS, generic::DLV> Implementations;
+
+TYPED_TEST_CASE(Rdata_DS_LIKE_Test, Implementations);
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toText_DS_LIKE) {
+ EXPECT_EQ(ds_like_txt, this->rdata_ds_like.toText());
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, badText_DS_LIKE) {
+ EXPECT_THROW(const TypeParam ds_like2("99999 5 2 BEEF"), InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 555 2 BEEF"),
+ InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 5 22222 BEEF"),
+ InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 5 2"), InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("GARBAGE IN"), InvalidRdataText);
+ // no space between the digest type and the digest.
+ EXPECT_THROW(const TypeParam ds_like2(
+ "12892 5 2F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5"), InvalidRdataText);
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, createFromWire_DS_LIKE) {
+ EXPECT_EQ(0, this->rdata_ds_like.compare(
+ *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass::IN(),
+ "rdata_ds_fromWire")));
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, assignment_DS_LIKE) {
+ TypeParam copy((string(ds_like_txt)));
+ copy = this->rdata_ds_like;
+ EXPECT_EQ(0, copy.compare(this->rdata_ds_like));
+
+ // Check if the copied data is valid even after the original is deleted
+ TypeParam* copy2 = new TypeParam(this->rdata_ds_like);
+ TypeParam copy3((string(ds_like_txt)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(this->rdata_ds_like));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(this->rdata_ds_like));
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, getTag_DS_LIKE) {
+ EXPECT_EQ(12892, this->rdata_ds_like.getTag());
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toWireRenderer) {
+ Rdata_DS_LIKE_Test<TypeParam>::renderer.skip(2);
+ TypeParam rdata_ds_like(ds_like_txt);
+ rdata_ds_like.toWire(this->renderer);
+
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_ds_fromWire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t*>
+ (this->obuffer.getData()) + 2,
+ this->obuffer.getLength() - 2,
+ &data[2], data.size() - 2);
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toWireBuffer) {
+ TypeParam rdata_ds_like(ds_like_txt);
+ rdata_ds_like.toWire(this->obuffer);
+}
+
+string ds_like_txt1("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different tag
+string ds_like_txt2("12893 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different algorithm
+string ds_like_txt3("12892 6 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest type
+string ds_like_txt4("12892 5 3 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest
+string ds_like_txt5("12892 5 2 F2E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest length
+string ds_like_txt6("12892 5 2 F2E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B555");
+
+TYPED_TEST(Rdata_DS_LIKE_Test, compare) {
+ // trivial case: self equivalence
+ EXPECT_EQ(0, TypeParam(ds_like_txt).compare(TypeParam(ds_like_txt)));
+
+ // non-equivalence tests
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt2)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt2).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt3)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt3).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt4)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt4).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt5)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt5).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt6)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt6).compare(TypeParam(ds_like_txt1)), 0);
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(this->rdata_ds_like.compare(*this->rdata_nomatch),
+ bad_cast);
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_ds_unittest.cc b/src/lib/dns/tests/rdata_ds_unittest.cc
deleted file mode 100644
index 5988620..0000000
--- a/src/lib/dns/tests/rdata_ds_unittest.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <string>
-
-#include <util/buffer.h>
-#include <dns/messagerenderer.h>
-#include <dns/rdata.h>
-#include <dns/rdataclass.h>
-#include <dns/rrclass.h>
-#include <dns/rrtype.h>
-
-#include <gtest/gtest.h>
-
-#include <dns/tests/unittest_util.h>
-#include <dns/tests/rdata_unittest.h>
-
-using isc::UnitTestUtil;
-using namespace std;
-using namespace isc::dns;
-using namespace isc::util;
-using namespace isc::dns::rdata;
-
-namespace {
-class Rdata_DS_Test : public RdataTest {
- // there's nothing to specialize
-};
-
-string ds_txt("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
- "5F0EB5C777586DE18DA6B5");
-const generic::DS rdata_ds(ds_txt);
-
-TEST_F(Rdata_DS_Test, toText_DS) {
- EXPECT_EQ(ds_txt, rdata_ds.toText());
-}
-
-TEST_F(Rdata_DS_Test, badText_DS) {
- EXPECT_THROW(const generic::DS ds2("99999 5 2 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 555 2 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 5 22222 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 5 2"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("GARBAGE IN"), InvalidRdataText);
-}
-
-// this test currently fails; we must fix it, and then migrate the test to
-// badText_DS
-TEST_F(Rdata_DS_Test, DISABLED_badText_DS) {
- // no space between the digest type and the digest.
- EXPECT_THROW(const generic::DS ds2(
- "12892 5 2F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
- "5F0EB5C777586DE18DA6B5"), InvalidRdataText);
-}
-
-TEST_F(Rdata_DS_Test, createFromWire_DS) {
- EXPECT_EQ(0, rdata_ds.compare(
- *rdataFactoryFromFile(RRType::DS(), RRClass::IN(),
- "rdata_ds_fromWire")));
-}
-
-TEST_F(Rdata_DS_Test, getTag_DS) {
- EXPECT_EQ(12892, rdata_ds.getTag());
-}
-
-TEST_F(Rdata_DS_Test, toWireRenderer) {
- renderer.skip(2);
- generic::DS rdata_ds(ds_txt);
- rdata_ds.toWire(renderer);
-
- vector<unsigned char> data;
- UnitTestUtil::readWireData("rdata_ds_fromWire", data);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- static_cast<const uint8_t *>(obuffer.getData()) + 2,
- obuffer.getLength() - 2, &data[2], data.size() - 2);
-}
-
-TEST_F(Rdata_DS_Test, toWireBuffer) {
- generic::DS rdata_ds(ds_txt);
- rdata_ds.toWire(obuffer);
-}
-
-TEST_F(Rdata_DS_Test, compare) {
- // trivial case: self equivalence
- EXPECT_EQ(0, generic::DS(ds_txt).compare(generic::DS(ds_txt)));
-
- // TODO: need more tests
-}
-
-}
diff --git a/src/lib/dns/tests/rdata_hinfo_unittest.cc b/src/lib/dns/tests/rdata_hinfo_unittest.cc
new file mode 100644
index 0000000..c52b2a0
--- /dev/null
+++ b/src/lib/dns/tests/rdata_hinfo_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+using namespace isc::dns::rdata::generic;
+
+namespace {
+class Rdata_HINFO_Test : public RdataTest {
+};
+
+static uint8_t hinfo_rdata[] = {0x07,0x50,0x65,0x6e,0x74,0x69,0x75,0x6d,0x05,
+ 0x4c,0x69,0x6e,0x75,0x78};
+static const char *hinfo_str = "\"Pentium\" \"Linux\"";
+static const char *hinfo_str1 = "\"Pen\\\"tium\" \"Linux\"";
+
+static const char *hinfo_str_small1 = "\"Lentium\" \"Linux\"";
+static const char *hinfo_str_small2 = "\"Pentium\" \"Kinux\"";
+static const char *hinfo_str_large1 = "\"Qentium\" \"Linux\"";
+static const char *hinfo_str_large2 = "\"Pentium\" \"UNIX\"";
+
+TEST_F(Rdata_HINFO_Test, createFromText) {
+ HINFO hinfo(hinfo_str);
+ EXPECT_EQ(string("Pentium"), hinfo.getCPU());
+ EXPECT_EQ(string("Linux"), hinfo.getOS());
+
+ // Test the text with double quotes in the middle of string
+ HINFO hinfo1(hinfo_str1);
+ EXPECT_EQ(string("Pen\"tium"), hinfo1.getCPU());
+}
+
+TEST_F(Rdata_HINFO_Test, badText) {
+ // Fields must be seperated by spaces
+ EXPECT_THROW(const HINFO hinfo("\"Pentium\"\"Linux\""), InvalidRdataText);
+ // Field cannot be missing
+ EXPECT_THROW(const HINFO hinfo("Pentium"), InvalidRdataText);
+ // The <character-string> cannot exceed 255 characters
+ string hinfo_str;
+ for (int i = 0; i < 257; ++i) {
+ hinfo_str += 'A';
+ }
+ hinfo_str += " Linux";
+ EXPECT_THROW(const HINFO hinfo(hinfo_str), CharStringTooLong);
+}
+
+TEST_F(Rdata_HINFO_Test, createFromWire) {
+ InputBuffer input_buffer(hinfo_rdata, sizeof(hinfo_rdata));
+ HINFO hinfo(input_buffer, sizeof(hinfo_rdata));
+ EXPECT_EQ(string("Pentium"), hinfo.getCPU());
+ EXPECT_EQ(string("Linux"), hinfo.getOS());
+}
+
+TEST_F(Rdata_HINFO_Test, toText) {
+ HINFO hinfo(hinfo_str);
+ EXPECT_EQ(hinfo_str, hinfo.toText());
+}
+
+TEST_F(Rdata_HINFO_Test, toWire) {
+ HINFO hinfo(hinfo_str);
+ hinfo.toWire(obuffer);
+
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), hinfo_rdata, sizeof(hinfo_rdata));
+}
+
+TEST_F(Rdata_HINFO_Test, toWireRenderer) {
+ HINFO hinfo(hinfo_str);
+
+ hinfo.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), hinfo_rdata, sizeof(hinfo_rdata));
+}
+
+TEST_F(Rdata_HINFO_Test, compare) {
+ HINFO hinfo(hinfo_str);
+ HINFO hinfo_small1(hinfo_str_small1);
+ HINFO hinfo_small2(hinfo_str_small2);
+ HINFO hinfo_large1(hinfo_str_large1);
+ HINFO hinfo_large2(hinfo_str_large2);
+
+ EXPECT_EQ(0, hinfo.compare(HINFO(hinfo_str)));
+ EXPECT_EQ(1, hinfo.compare(HINFO(hinfo_str_small1)));
+ EXPECT_EQ(1, hinfo.compare(HINFO(hinfo_str_small2)));
+ EXPECT_EQ(-1, hinfo.compare(HINFO(hinfo_str_large1)));
+ EXPECT_EQ(-1, hinfo.compare(HINFO(hinfo_str_large2)));
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_minfo_unittest.cc b/src/lib/dns/tests/rdata_minfo_unittest.cc
new file mode 100644
index 0000000..30c7c39
--- /dev/null
+++ b/src/lib/dns/tests/rdata_minfo_unittest.cc
@@ -0,0 +1,184 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for generic
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+// minfo text
+const char* const minfo_txt = "rmailbox.example.com. emailbox.example.com.";
+const char* const minfo_txt2 = "root.example.com. emailbox.example.com.";
+const char* const too_long_label = "01234567890123456789012345678901234567"
+ "89012345678901234567890123";
+
+namespace {
+class Rdata_MINFO_Test : public RdataTest {
+public:
+ Rdata_MINFO_Test():
+ rdata_minfo(string(minfo_txt)), rdata_minfo2(string(minfo_txt2)) {}
+
+ const generic::MINFO rdata_minfo;
+ const generic::MINFO rdata_minfo2;
+};
+
+
+TEST_F(Rdata_MINFO_Test, createFromText) {
+ EXPECT_EQ(Name("rmailbox.example.com."), rdata_minfo.getRmailbox());
+ EXPECT_EQ(Name("emailbox.example.com."), rdata_minfo.getEmailbox());
+
+ EXPECT_EQ(Name("root.example.com."), rdata_minfo2.getRmailbox());
+ EXPECT_EQ(Name("emailbox.example.com."), rdata_minfo2.getEmailbox());
+}
+
+TEST_F(Rdata_MINFO_Test, badText) {
+ // incomplete text
+ EXPECT_THROW(generic::MINFO("root.example.com."),
+ InvalidRdataText);
+ // number of fields (must be 2) is incorrect
+ EXPECT_THROW(generic::MINFO("root.example.com emailbox.example.com. "
+ "example.com."),
+ InvalidRdataText);
+ // bad rmailbox name
+ EXPECT_THROW(generic::MINFO("root.example.com. emailbox.example.com." +
+ string(too_long_label)),
+ TooLongLabel);
+ // bad emailbox name
+ EXPECT_THROW(generic::MINFO("root.example.com." +
+ string(too_long_label) + " emailbox.example.com."),
+ TooLongLabel);
+}
+
+TEST_F(Rdata_MINFO_Test, createFromWire) {
+ // uncompressed names
+ EXPECT_EQ(0, rdata_minfo.compare(
+ *rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire1.wire")));
+ // compressed names
+ EXPECT_EQ(0, rdata_minfo.compare(
+ *rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire2.wire", 15)));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire3.wire"),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire4.wire"),
+ InvalidRdataLength);
+ // bogus rmailbox name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire5.wire"),
+ DNSMessageFORMERR);
+ // bogus emailbox name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire6.wire"),
+ DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_MINFO_Test, assignment) {
+ generic::MINFO copy((string(minfo_txt2)));
+ copy = rdata_minfo;
+ EXPECT_EQ(0, copy.compare(rdata_minfo));
+
+ // Check if the copied data is valid even after the original is deleted
+ generic::MINFO* copy2 = new generic::MINFO(rdata_minfo);
+ generic::MINFO copy3((string(minfo_txt2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_minfo));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_minfo));
+}
+
+TEST_F(Rdata_MINFO_Test, toWireBuffer) {
+ rdata_minfo.toWire(obuffer);
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_minfo_toWireUncompressed1.wire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data[0], data.size());
+
+ obuffer.clear();
+ rdata_minfo2.toWire(obuffer);
+ vector<unsigned char> data2;
+ UnitTestUtil::readWireData("rdata_minfo_toWireUncompressed2.wire", data2);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data2[0], data2.size());
+}
+
+TEST_F(Rdata_MINFO_Test, toWireRenderer) {
+ rdata_minfo.toWire(renderer);
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_minfo_toWire1.wire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data[0], data.size());
+ renderer.clear();
+ rdata_minfo2.toWire(renderer);
+ vector<unsigned char> data2;
+ UnitTestUtil::readWireData("rdata_minfo_toWire2.wire", data2);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data2[0], data2.size());
+}
+
+TEST_F(Rdata_MINFO_Test, toText) {
+ EXPECT_EQ(minfo_txt, rdata_minfo.toText());
+ EXPECT_EQ(minfo_txt2, rdata_minfo2.toText());
+}
+
+TEST_F(Rdata_MINFO_Test, compare) {
+ // check reflexivity
+ EXPECT_EQ(0, rdata_minfo.compare(rdata_minfo));
+
+ // names must be compared in case-insensitive manner
+ EXPECT_EQ(0, rdata_minfo.compare(generic::MINFO("RMAILBOX.example.com. "
+ "emailbox.EXAMPLE.com.")));
+
+ // another MINFO whose rmailbox name is larger than that of rdata_minfo.
+ const generic::MINFO large1_minfo("zzzzzzzz.example.com. "
+ "emailbox.example.com.");
+ EXPECT_GT(0, rdata_minfo.compare(large1_minfo));
+ EXPECT_LT(0, large1_minfo.compare(rdata_minfo));
+
+ // another MINFO whose emailbox name is larger than that of rdata_minfo.
+ const generic::MINFO large2_minfo("rmailbox.example.com. "
+ "zzzzzzzzzzz.example.com.");
+ EXPECT_GT(0, rdata_minfo.compare(large2_minfo));
+ EXPECT_LT(0, large2_minfo.compare(rdata_minfo));
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_minfo.compare(*RdataTest::rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_naptr_unittest.cc b/src/lib/dns/tests/rdata_naptr_unittest.cc
new file mode 100644
index 0000000..f905943
--- /dev/null
+++ b/src/lib/dns/tests/rdata_naptr_unittest.cc
@@ -0,0 +1,178 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+using namespace isc::dns::rdata::generic;
+
+namespace {
+class Rdata_NAPTR_Test : public RdataTest {
+};
+
+// 10 100 "S" "SIP+D2U" "" _sip._udp.example.com.
+static uint8_t naptr_rdata[] = {0x00,0x0a,0x00,0x64,0x01,0x53,0x07,0x53,0x49,
+ 0x50,0x2b,0x44,0x32,0x55,0x00,0x04,0x5f,0x73,0x69,0x70,0x04,0x5f,0x75,0x64,
+ 0x70,0x07,0x65,0x78,0x61,0x6d,0x70,0x6c,0x65,0x03,0x63,0x6f,0x6d,0x00};
+
+static const char *naptr_str =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str2 =
+ "10 100 S SIP+D2U \"\" _sip._udp.example.com.";
+
+static const char *naptr_str_small1 =
+ "9 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small2 =
+ "10 90 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small3 =
+ "10 100 \"R\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small4 =
+ "10 100 \"S\" \"SIP+C2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small5 =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _rip._udp.example.com.";
+
+static const char *naptr_str_large1 =
+ "11 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large2 =
+ "10 110 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large3 =
+ "10 100 \"T\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large4 =
+ "10 100 \"S\" \"SIP+E2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large5 =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _tip._udp.example.com.";
+
+TEST_F(Rdata_NAPTR_Test, createFromText) {
+ NAPTR naptr(naptr_str);
+ EXPECT_EQ(10, naptr.getOrder());
+ EXPECT_EQ(100, naptr.getPreference());
+ EXPECT_EQ(string("S"), naptr.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr.getServices());
+ EXPECT_EQ(string(""), naptr.getRegexp());
+ EXPECT_EQ(Name("_sip._udp.example.com."), naptr.getReplacement());
+
+ // Test <char-string> that separated by space
+ NAPTR naptr2(naptr_str2);
+ EXPECT_EQ(string("S"), naptr2.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr2.getServices());
+}
+
+TEST_F(Rdata_NAPTR_Test, badText) {
+ // Order number cannot exceed 65535
+ EXPECT_THROW(const NAPTR naptr("65536 10 S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Preference number cannot exceed 65535
+ EXPECT_THROW(const NAPTR naptr("100 65536 S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // No regexp given
+ EXPECT_THROW(const NAPTR naptr("100 10 S SIP _sip._udp.example.com."),
+ InvalidRdataText);
+ // The double quotes seperator must match
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Order or preference cannot be missed
+ EXPECT_THROW(const NAPTR naptr("10 \"S\" SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Fields must be seperated by spaces
+ EXPECT_THROW(const NAPTR naptr("100 10S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S\"\"SIP\" \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Field cannot be missing
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S\""), InvalidRdataText);
+
+ // The <character-string> cannot exceed 255 characters
+ string naptr_str;
+ naptr_str += "100 10 ";
+ for (int i = 0; i < 257; ++i) {
+ naptr_str += 'A';
+ }
+ naptr_str += " SIP \"\" _sip._udp.example.com.";
+ EXPECT_THROW(const NAPTR naptr(naptr_str), CharStringTooLong);
+}
+
+TEST_F(Rdata_NAPTR_Test, createFromWire) {
+ InputBuffer input_buffer(naptr_rdata, sizeof(naptr_rdata));
+ NAPTR naptr(input_buffer, sizeof(naptr_rdata));
+ EXPECT_EQ(10, naptr.getOrder());
+ EXPECT_EQ(100, naptr.getPreference());
+ EXPECT_EQ(string("S"), naptr.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr.getServices());
+ EXPECT_EQ(string(""), naptr.getRegexp());
+ EXPECT_EQ(Name("_sip._udp.example.com."), naptr.getReplacement());
+}
+
+TEST_F(Rdata_NAPTR_Test, toWire) {
+ NAPTR naptr(naptr_str);
+ naptr.toWire(obuffer);
+
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), naptr_rdata, sizeof(naptr_rdata));
+}
+
+TEST_F(Rdata_NAPTR_Test, toWireRenderer) {
+ NAPTR naptr(naptr_str);
+
+ naptr.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), naptr_rdata, sizeof(naptr_rdata));
+}
+
+TEST_F(Rdata_NAPTR_Test, toText) {
+ NAPTR naptr(naptr_str);
+ EXPECT_EQ(naptr_str, naptr.toText());
+}
+
+TEST_F(Rdata_NAPTR_Test, compare) {
+ NAPTR naptr(naptr_str);
+ NAPTR naptr_small1(naptr_str_small1);
+ NAPTR naptr_small2(naptr_str_small2);
+ NAPTR naptr_small3(naptr_str_small3);
+ NAPTR naptr_small4(naptr_str_small4);
+ NAPTR naptr_small5(naptr_str_small5);
+ NAPTR naptr_large1(naptr_str_large1);
+ NAPTR naptr_large2(naptr_str_large2);
+ NAPTR naptr_large3(naptr_str_large3);
+ NAPTR naptr_large4(naptr_str_large4);
+ NAPTR naptr_large5(naptr_str_large5);
+
+ EXPECT_EQ(0, naptr.compare(NAPTR(naptr_str)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small1)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small2)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small3)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small4)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small5)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large1)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large2)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large3)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large4)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large5)));
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_txt_like_unittest.cc b/src/lib/dns/tests/rdata_txt_like_unittest.cc
index 19da492..c8f489c 100644
--- a/src/lib/dns/tests/rdata_txt_like_unittest.cc
+++ b/src/lib/dns/tests/rdata_txt_like_unittest.cc
@@ -215,4 +215,39 @@ TYPED_TEST(Rdata_TXT_LIKE_Test, assignment) {
EXPECT_EQ(0, rdata2.compare(rdata1));
}
+TYPED_TEST(Rdata_TXT_LIKE_Test, compare) {
+ string const txt1 ("aaaaaaaa");
+ string const txt2 ("aaaaaaaaaa");
+ string const txt3 ("bbbbbbbb");
+ string const txt4 (129, 'a');
+ string const txt5 (128, 'b');
+
+ EXPECT_EQ(TypeParam(txt1).compare(TypeParam(txt1)), 0);
+
+ EXPECT_LT(TypeParam("").compare(TypeParam(txt1)), 0);
+ EXPECT_GT(TypeParam(txt1).compare(TypeParam("")), 0);
+
+ EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt2)), 0);
+ EXPECT_GT(TypeParam(txt2).compare(TypeParam(txt1)), 0);
+
+ EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt3)), 0);
+ EXPECT_GT(TypeParam(txt3).compare(TypeParam(txt1)), 0);
+
+ // we're comparing the data raw, starting at the length octet, so a shorter
+ // string sorts before a longer one no matter the lexicopraphical order
+ EXPECT_LT(TypeParam(txt3).compare(TypeParam(txt2)), 0);
+ EXPECT_GT(TypeParam(txt2).compare(TypeParam(txt3)), 0);
+
+ // to make sure the length octet compares unsigned
+ EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt4)), 0);
+ EXPECT_GT(TypeParam(txt4).compare(TypeParam(txt1)), 0);
+
+ EXPECT_LT(TypeParam(txt5).compare(TypeParam(txt4)), 0);
+ EXPECT_GT(TypeParam(txt4).compare(TypeParam(txt5)), 0);
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(TypeParam(txt1).compare(*this->rdata_nomatch),
+ bad_cast);
+}
+
}
diff --git a/src/lib/dns/tests/testdata/Makefile.am b/src/lib/dns/tests/testdata/Makefile.am
index 743b5d2..d8f0d1c 100644
--- a/src/lib/dns/tests/testdata/Makefile.am
+++ b/src/lib/dns/tests/testdata/Makefile.am
@@ -6,7 +6,9 @@ BUILT_SOURCES += message_fromWire10.wire message_fromWire11.wire
BUILT_SOURCES += message_fromWire12.wire message_fromWire13.wire
BUILT_SOURCES += message_fromWire14.wire message_fromWire15.wire
BUILT_SOURCES += message_fromWire16.wire message_fromWire17.wire
-BUILT_SOURCES += message_fromWire18.wire
+BUILT_SOURCES += message_fromWire18.wire message_fromWire19.wire
+BUILT_SOURCES += message_fromWire20.wire message_fromWire21.wire
+BUILT_SOURCES += message_fromWire22.wire
BUILT_SOURCES += message_toWire2.wire message_toWire3.wire
BUILT_SOURCES += message_toWire4.wire message_toWire5.wire
BUILT_SOURCES += message_toText1.wire message_toText2.wire
@@ -26,10 +28,20 @@ BUILT_SOURCES += rdata_nsec3_fromWire10.wire rdata_nsec3_fromWire11.wire
BUILT_SOURCES += rdata_nsec3_fromWire12.wire rdata_nsec3_fromWire13.wire
BUILT_SOURCES += rdata_nsec3_fromWire14.wire rdata_nsec3_fromWire15.wire
BUILT_SOURCES += rdata_rrsig_fromWire2.wire
+BUILT_SOURCES += rdata_minfo_fromWire1.wire rdata_minfo_fromWire2.wire
+BUILT_SOURCES += rdata_minfo_fromWire3.wire rdata_minfo_fromWire4.wire
+BUILT_SOURCES += rdata_minfo_fromWire5.wire rdata_minfo_fromWire6.wire
+BUILT_SOURCES += rdata_minfo_toWire1.wire rdata_minfo_toWire2.wire
+BUILT_SOURCES += rdata_minfo_toWireUncompressed1.wire
+BUILT_SOURCES += rdata_minfo_toWireUncompressed2.wire
BUILT_SOURCES += rdata_rp_fromWire1.wire rdata_rp_fromWire2.wire
BUILT_SOURCES += rdata_rp_fromWire3.wire rdata_rp_fromWire4.wire
BUILT_SOURCES += rdata_rp_fromWire5.wire rdata_rp_fromWire6.wire
BUILT_SOURCES += rdata_rp_toWire1.wire rdata_rp_toWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire1.wire rdata_afsdb_fromWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire3.wire rdata_afsdb_fromWire4.wire
+BUILT_SOURCES += rdata_afsdb_fromWire5.wire
+BUILT_SOURCES += rdata_afsdb_toWire1.wire rdata_afsdb_toWire2.wire
BUILT_SOURCES += rdata_soa_toWireUncompressed.wire
BUILT_SOURCES += rdata_txt_fromWire2.wire rdata_txt_fromWire3.wire
BUILT_SOURCES += rdata_txt_fromWire4.wire rdata_txt_fromWire5.wire
@@ -61,6 +73,8 @@ EXTRA_DIST += message_fromWire11.spec message_fromWire12.spec
EXTRA_DIST += message_fromWire13.spec message_fromWire14.spec
EXTRA_DIST += message_fromWire15.spec message_fromWire16.spec
EXTRA_DIST += message_fromWire17.spec message_fromWire18.spec
+EXTRA_DIST += message_fromWire19.spec message_fromWire20.spec
+EXTRA_DIST += message_fromWire21.spec message_fromWire22.spec
EXTRA_DIST += message_toWire1 message_toWire2.spec message_toWire3.spec
EXTRA_DIST += message_toWire4.spec message_toWire5.spec
EXTRA_DIST += message_toText1.txt message_toText1.spec
@@ -99,8 +113,18 @@ EXTRA_DIST += rdata_rp_fromWire1.spec rdata_rp_fromWire2.spec
EXTRA_DIST += rdata_rp_fromWire3.spec rdata_rp_fromWire4.spec
EXTRA_DIST += rdata_rp_fromWire5.spec rdata_rp_fromWire6.spec
EXTRA_DIST += rdata_rp_toWire1.spec rdata_rp_toWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire1.spec rdata_afsdb_fromWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire3.spec rdata_afsdb_fromWire4.spec
+EXTRA_DIST += rdata_afsdb_fromWire5.spec
+EXTRA_DIST += rdata_afsdb_toWire1.spec rdata_afsdb_toWire2.spec
EXTRA_DIST += rdata_soa_fromWire rdata_soa_toWireUncompressed.spec
EXTRA_DIST += rdata_srv_fromWire
+EXTRA_DIST += rdata_minfo_fromWire1.spec rdata_minfo_fromWire2.spec
+EXTRA_DIST += rdata_minfo_fromWire3.spec rdata_minfo_fromWire4.spec
+EXTRA_DIST += rdata_minfo_fromWire5.spec rdata_minfo_fromWire6.spec
+EXTRA_DIST += rdata_minfo_toWire1.spec rdata_minfo_toWire2.spec
+EXTRA_DIST += rdata_minfo_toWireUncompressed1.spec
+EXTRA_DIST += rdata_minfo_toWireUncompressed2.spec
EXTRA_DIST += rdata_txt_fromWire1 rdata_txt_fromWire2.spec
EXTRA_DIST += rdata_txt_fromWire3.spec rdata_txt_fromWire4.spec
EXTRA_DIST += rdata_txt_fromWire5.spec rdata_unknown_fromWire
diff --git a/src/lib/dns/tests/testdata/message_fromWire19.spec b/src/lib/dns/tests/testdata/message_fromWire19.spec
new file mode 100644
index 0000000..8212dbf
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire19.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# answer section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+ancount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire20.spec b/src/lib/dns/tests/testdata/message_fromWire20.spec
new file mode 100644
index 0000000..91986e4
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire20.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# authority section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+nscount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire21.spec b/src/lib/dns/tests/testdata/message_fromWire21.spec
new file mode 100644
index 0000000..cd6aac9
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire21.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# additional section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+arcount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire22.spec b/src/lib/dns/tests/testdata/message_fromWire22.spec
new file mode 100644
index 0000000..a52523b
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire22.spec
@@ -0,0 +1,14 @@
+#
+# A simple DNS message containing one SOA RR in the answer section. This is
+# intended to be trimmed to emulate a bogus message.
+#
+
+[custom]
+sections: header:question:soa
+[header]
+qr: 1
+ancount: 1
+[question]
+rrtype: SOA
+[soa]
+as_rr: True
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
new file mode 100644
index 0000000..f831313
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: afsdb
+[afsdb]
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
new file mode 100644
index 0000000..f33e768
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com
+[afsdb]
+server: afsdb.ptr=0
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
new file mode 100644
index 0000000..993032f
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
new file mode 100644
index 0000000..37abf13
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
new file mode 100644
index 0000000..0ea79dd
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+server: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
new file mode 100644
index 0000000..1946458
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
new file mode 100644
index 0000000..c80011a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
@@ -0,0 +1,8 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com.
+[afsdb]
+subtype: 0
+server: root.example.com
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
new file mode 100644
index 0000000..2c43db0
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: minfo
+[minfo]
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
new file mode 100644
index 0000000..d781cac
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
@@ -0,0 +1,7 @@
+[custom]
+sections: name:minfo
+[name]
+name: a.example.com.
+[minfo]
+rmailbox: rmailbox.ptr=02
+emailbox: emailbox.ptr=02
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
new file mode 100644
index 0000000..a1d4b76
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+# rdlength too short
+[minfo]
+emailbox: emailbox.ptr=11
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
new file mode 100644
index 0000000..269a6ce
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+# rdlength too long
+[minfo]
+emailbox: emailbox.ptr=11
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
new file mode 100644
index 0000000..3a888e3
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+# bogus rmailbox name
+[minfo]
+rmailbox: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
new file mode 100644
index 0000000..c75ed8e
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+# bogus emailbox name
+[minfo]
+emailbox: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
new file mode 100644
index 0000000..7b340a3
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+[minfo]
+emailbox: emailbox.ptr=09
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
new file mode 100644
index 0000000..132f118
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+[minfo]
+rmailbox: root.example.com.
+emailbox: emailbox.ptr=05
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
new file mode 100644
index 0000000..d99a381
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
@@ -0,0 +1,7 @@
+#
+# A simplest form of MINFO: all default parameters
+#
+[custom]
+sections: minfo
+[minfo]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
new file mode 100644
index 0000000..0f78fcc
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
@@ -0,0 +1,8 @@
+#
+# A simplest form of MINFO: custom rmailbox and default emailbox
+#
+[custom]
+sections: minfo
+[minfo]
+rmailbox: root.example.com.
+rdlen: -1
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index d0f1d74..433bb7d 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -137,6 +137,18 @@ public:
};
///
+/// \brief A generic exception that is thrown when a function is
+/// not implemented.
+///
+/// This may be due to unfinished implementation or in case the
+/// function isn't even planned to be provided for that situation.
+class NotImplemented : public Exception {
+public:
+ NotImplemented(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+///
/// A shortcut macro to insert known values into exception arguments.
///
/// It allows the \c stream argument to be part of a statement using an
diff --git a/src/lib/python/isc/Makefile.am b/src/lib/python/isc/Makefile.am
index d94100b..a3e74c5 100644
--- a/src/lib/python/isc/Makefile.am
+++ b/src/lib/python/isc/Makefile.am
@@ -1,4 +1,5 @@
-SUBDIRS = datasrc cc config log net notify util testutils acl bind10
+SUBDIRS = datasrc cc config dns log net notify util testutils acl bind10
+SUBDIRS += xfrin log_messages
python_PYTHON = __init__.py
diff --git a/src/lib/python/isc/__init__.py b/src/lib/python/isc/__init__.py
index 8fcbf42..029f110 100644
--- a/src/lib/python/isc/__init__.py
+++ b/src/lib/python/isc/__init__.py
@@ -1,4 +1,7 @@
-import isc.datasrc
+# On some systems, it appears the dynamic linker gets
+# confused if the order is not right here
+# There is probably a solution for this, but for now:
+# order is important here!
import isc.cc
import isc.config
-#import isc.dns
+import isc.datasrc
diff --git a/src/lib/python/isc/acl/tests/Makefile.am b/src/lib/python/isc/acl/tests/Makefile.am
index 87781d7..e0a1895 100644
--- a/src/lib/python/isc/acl/tests/Makefile.am
+++ b/src/lib/python/isc/acl/tests/Makefile.am
@@ -7,7 +7,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,7 +19,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/isc/python/acl/.libs:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/isc/python/acl/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
index 9fcc74e..8e5b019 100644
--- a/src/lib/python/isc/bind10/sockcreator.py
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -17,7 +17,7 @@ import socket
import struct
import os
import subprocess
-from bind10_messages import *
+from isc.log_messages.bind10_messages import *
from libutil_io_python import recv_fd
logger = isc.log.Logger("boss")
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
index f498b86..df8ab30 100644
--- a/src/lib/python/isc/bind10/tests/Makefile.am
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -9,7 +9,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -22,7 +22,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/cc/tests/Makefile.am b/src/lib/python/isc/cc/tests/Makefile.am
index 4e49501..4c2acc0 100644
--- a/src/lib/python/isc/cc/tests/Makefile.am
+++ b/src/lib/python/isc/cc/tests/Makefile.am
@@ -10,7 +10,7 @@ EXTRA_DIST += test_session.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -23,7 +23,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ PYTHONPATH=$(COMMON_PYTHON_PATH) \
BIND10_TEST_SOCKET_FILE=$(builddir)/test_socket.sock \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/config/Makefile.am b/src/lib/python/isc/config/Makefile.am
index 312ad33..ef696fb 100644
--- a/src/lib/python/isc/config/Makefile.am
+++ b/src/lib/python/isc/config/Makefile.am
@@ -1,27 +1,31 @@
SUBDIRS = . tests
python_PYTHON = __init__.py ccsession.py cfgmgr.py config_data.py module_spec.py
-pyexec_DATA = cfgmgr_messages.py $(top_builddir)/src/lib/python/config_messages.py
-
pythondir = $(pyexecdir)/isc/config
-# Define rule to build logging source files from message file
-cfgmgr_messages.py: cfgmgr_messages.mes
- $(top_builddir)/src/lib/log/compiler/message \
- -p $(top_srcdir)/src/lib/python/isc/config/cfgmgr_messages.mes
-
-$(top_builddir)/src/lib/python/config_messages.py: config_messages.mes
- $(top_builddir)/src/lib/log/compiler/message \
- -p -d $(top_builddir)/src/lib/python \
- $(top_srcdir)/src/lib/python/isc/config/config_messages.mes
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+BUILT_SOURCES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+nodist_pylogmessage_PYTHON += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
-CLEANFILES = cfgmgr_messages.py cfgmgr_messages.pyc
-CLEANFILES += $(top_builddir)/src/lib/python/config_messages.py
-CLEANFILES += $(top_builddir)/src/lib/python/config_messages.pyc
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.pyc
CLEANDIRS = __pycache__
EXTRA_DIST = cfgmgr_messages.mes config_messages.mes
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py : cfgmgr_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cfgmgr_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py : config_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/config_messages.mes
+
clean-local:
rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index ba7724c..d07df1e 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -43,7 +43,7 @@ from isc.util.file import path_search
import bind10_config
from isc.log import log_config_update
import json
-from config_messages import *
+from isc.log_messages.config_messages import *
logger = isc.log.Logger("config")
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 1db9fd3..9996a19 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -32,7 +32,7 @@ from isc.config import ccsession, config_data, module_spec
from isc.util.file import path_search
import bind10_config
import isc.log
-from cfgmgr_messages import *
+from isc.log_messages.cfgmgr_messages import *
logger = isc.log.Logger("cfgmgr")
diff --git a/src/lib/python/isc/config/tests/Makefile.am b/src/lib/python/isc/config/tests/Makefile.am
index 47ccc41..6670ee7 100644
--- a/src/lib/python/isc/config/tests/Makefile.am
+++ b/src/lib/python/isc/config/tests/Makefile.am
@@ -8,7 +8,7 @@ EXTRA_DIST += unittest_fakesession.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -21,7 +21,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/config \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/config \
B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
CONFIG_WR_TESTDATA_PATH=$(abs_top_builddir)/src/lib/config/tests/testdata \
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index 46fb661..07fb417 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -1,10 +1,44 @@
SUBDIRS = . tests
+# old data, should be removed in the near future once conversion is done
+pythondir = $(pyexecdir)/isc/datasrc
python_PYTHON = __init__.py master.py sqlite3_ds.py
-pythondir = $(pyexecdir)/isc/datasrc
+
+# new data
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(SQLITE_CFLAGS)
+
+python_LTLIBRARIES = datasrc.la
+datasrc_la_SOURCES = datasrc.cc datasrc.h
+datasrc_la_SOURCES += client_python.cc client_python.h
+datasrc_la_SOURCES += iterator_python.cc iterator_python.h
+datasrc_la_SOURCES += finder_python.cc finder_python.h
+datasrc_la_SOURCES += updater_python.cc updater_python.h
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+datasrc_la_SOURCES += ${top_srcdir}/src/lib/datasrc/sqlite3_accessor.cc
+
+datasrc_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+datasrc_la_LDFLAGS = $(PYTHON_LDFLAGS)
+datasrc_la_LDFLAGS += -module
+datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
+datasrc_la_LIBADD += $(PYTHON_LIB)
+#datasrc_la_LIBADD += $(SQLITE_LIBS)
+
+EXTRA_DIST = client_inc.cc
+EXTRA_DIST += finder_inc.cc
+EXTRA_DIST += iterator_inc.cc
+EXTRA_DIST += updater_inc.cc
CLEANDIRS = __pycache__
clean-local:
rm -rf $(CLEANDIRS)
+
diff --git a/src/lib/python/isc/datasrc/__init__.py b/src/lib/python/isc/datasrc/__init__.py
index 0e1e481..0b4ed98 100644
--- a/src/lib/python/isc/datasrc/__init__.py
+++ b/src/lib/python/isc/datasrc/__init__.py
@@ -1,2 +1,21 @@
-from isc.datasrc.master import *
+import sys
+import os
+
+# this setup is a temporary workaround to deal with the problem of
+# having both 'normal' python modules and a wrapper module
+# Once all programs use the new interface, we should remove the
+# old, and the setup can be made similar to that of the log wrappers.
+intree = False
+for base in sys.path[:]:
+ datasrc_libdir = os.path.join(base, 'isc/datasrc/.libs')
+ if os.path.exists(datasrc_libdir):
+ sys.path.insert(0, datasrc_libdir)
+ intree = True
+
+if intree:
+ from datasrc import *
+else:
+ from isc.datasrc.datasrc import *
from isc.datasrc.sqlite3_ds import *
+from isc.datasrc.master import *
+
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
new file mode 100644
index 0000000..1eba488
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -0,0 +1,157 @@
+namespace {
+
+const char* const DataSourceClient_doc = "\
+The base class of data source clients.\n\
+\n\
+This is the python wrapper for the abstract base class that defines\n\
+the common interface for various types of data source clients. A data\n\
+source client is a top level access point to a data source, allowing \n\
+various operations on the data source such as lookups, traversing or \n\
+updates. The client class itself has limited focus and delegates \n\
+the responsibility for these specific operations to other (c++) classes;\n\
+in general methods of this class act as factories of these other classes.\n\
+\n\
+- InMemoryClient: A client of a conceptual data source that stores all\n\
+ necessary data in memory for faster lookups\n\
+- DatabaseClient: A client that uses a real database backend (such as\n\
+ an SQL database). It would internally hold a connection to the\n\
+ underlying database system.\n\
+\n\
+It is intentional that while the term these derived classes don't\n\
+contain \"DataSource\" unlike their base class. It's also noteworthy\n\
+that the naming of the base class is somewhat redundant because the\n\
+namespace datasrc would indicate that it's related to a data source.\n\
+The redundant naming comes from the observation that namespaces are\n\
+often omitted with using directives, in which case \"Client\" would be\n\
+too generic. On the other hand, concrete derived classes are generally\n\
+not expected to be referenced directly from other modules and\n\
+applications, so we'll give them more concise names such as\n\
+InMemoryClient. A single DataSourceClient object is expected to handle\n\
+only a single RR class even if the underlying data source contains\n\
+records for multiple RR classes. Likewise, (when we support views) a\n\
+DataSourceClient object is expected to handle only a single view.\n\
+\n\
+If the application uses multiple threads, each thread will need to\n\
+create and use a separate DataSourceClient. This is because some\n\
+database backend doesn't allow multiple threads to share the same\n\
+connection to the database.\n\
+\n\
+For a client using an in memory backend, this may result in having a\n\
+multiple copies of the same data in memory, increasing the memory\n\
+footprint substantially. Depending on how to support multiple CPU\n\
+cores for concurrent lookups on the same single data source (which is\n\
+not fully fixed yet, and for which multiple threads may be used), this\n\
+design may have to be revisited. This class (and therefore its derived\n\
+classes) are not copyable. This is because the derived classes would\n\
+generally contain attributes that are not easy to copy (such as a\n\
+large size of in memory data or a network connection to a database\n\
+server). In order to avoid a surprising disruption with a naive copy\n\
+it's prohibited explicitly. For the expected usage of the client\n\
+classes the restriction should be acceptable.\n\
+\n\
+Todo: This class is still not complete. It will need more factory\n\
+methods, e.g. for (re)loading a zone.\n\
+";
+
+const char* const DataSourceClient_findZone_doc = "\
+find_zone(name) -> (code, ZoneFinder)\n\
+\n\
+Returns a ZoneFinder for a zone that best matches the given name.\n\
+\n\
+code: The result code of the operation (integer).\n\
+- DataSourceClient.SUCCESS: A zone that gives an exact match is found\n\
+- DataSourceClient.PARTIALMATCH: A zone whose origin is a super domain of name\n\
+ is found (but there is no exact match)\n\
+- DataSourceClient.NOTFOUND: For all other cases.\n\
+ZoneFinder: ZoneFinder object for the found zone if one is found;\n\
+otherwise None.\n\
+\n\
+Any internal error will be raised as an isc.datasrc.Error exception\n\
+\n\
+Parameters:\n\
+ name A domain name for which the search is performed.\n\
+\n\
+Return Value(s): A tuple containing a result value and a ZoneFinder object or\n\
+None\n\
+";
+
+const char* const DataSourceClient_getIterator_doc = "\
+get_iterator(name) -> ZoneIterator\n\
+\n\
+Returns an iterator to the given zone.\n\
+\n\
+This allows for traversing the whole zone. The returned object can\n\
+provide the RRsets one by one.\n\
+\n\
+This throws isc.datasrc.Error when the zone does not exist in the\n\
+datasource, or when an internal error occurs.\n\
+\n\
+The default implementation throws isc.datasrc.NotImplemented. This allows for\n\
+easy and fast deployment of minimal custom data sources, where the\n\
+user/implementator doesn't have to care about anything else but the\n\
+actual queries. Also, in some cases, it isn't possible to traverse the\n\
+zone from logic point of view (eg. dynamically generated zone data).\n\
+\n\
+It is not fixed if a concrete implementation of this method can throw\n\
+anything else.\n\
+\n\
+Parameters:\n\
+ isc.dns.Name The name of zone apex to be traversed. It doesn't do\n\
+ nearest match as find_zone.\n\
+\n\
+Return Value(s): Pointer to the iterator.\n\
+";
+
+const char* const DataSourceClient_getUpdater_doc = "\
+get_updater(name, replace) -> ZoneUpdater\n\
+\n\
+Return an updater to make updates to a specific zone.\n\
+\n\
+The RR class of the zone is the one that the client is expected to\n\
+handle (see the detailed description of this class).\n\
+\n\
+If the specified zone is not found via the client, a NULL pointer will\n\
+be returned; in other words a completely new zone cannot be created\n\
+using an updater. It must be created beforehand (even if it's an empty\n\
+placeholder) in a way specific to the underlying data source.\n\
+\n\
+Conceptually, the updater will trigger a separate transaction for\n\
+subsequent updates to the zone within the context of the updater (the\n\
+actual implementation of the \"transaction\" may vary for the specific\n\
+underlying data source). Until commit() is performed on the updater,\n\
+the intermediate updates won't affect the results of other methods\n\
+(and the result of the object's methods created by other factory\n\
+methods). Likewise, if the updater is destructed without performing\n\
+commit(), the intermediate updates will be effectively canceled and\n\
+will never affect other methods.\n\
+\n\
+If the underlying data source allows concurrent updates, this method\n\
+can be called multiple times while the previously returned updater(s)\n\
+are still active. In this case each updater triggers a different\n\
+\"transaction\". Normally it would be for different zones for such a\n\
+case as handling multiple incoming AXFR streams concurrently, but this\n\
+interface does not even prohibit an attempt of getting more than one\n\
+updater for the same zone, as long as the underlying data source\n\
+allows such an operation (and any conflict resolution is left to the\n\
+specific implementation).\n\
+\n\
+If replace is true, any existing RRs of the zone will be deleted on\n\
+successful completion of updates (after commit() on the updater); if\n\
+it's false, the existing RRs will be intact unless explicitly deleted\n\
+by delete_rrset() on the updater.\n\
+\n\
+A data source can be \"read only\" or can prohibit partial updates. In\n\
+such cases this method will result in an isc.datasrc.NotImplemented exception\n\
+unconditionally or when replace is false).\n\
+\n\
+Exceptions:\n\
+ isc.datasrc. NotImplemented The underlying data source does not support\n\
+ updates.\n\
+ isc.datasrc.Error Internal error in the underlying data source.\n\
+\n\
+Parameters:\n\
+ name The zone name to be updated\n\
+ replace Whether to delete existing RRs before making updates\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
new file mode 100644
index 0000000..984eabf
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -0,0 +1,264 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/pydnspp_common.h>
+
+#include "datasrc.h"
+#include "client_python.h"
+#include "finder_python.h"
+#include "iterator_python.h"
+#include "updater_python.h"
+#include "client_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_DataSourceClient : public PyObject {
+public:
+ s_DataSourceClient() : cppobj(NULL) {};
+ DataSourceClient* cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_DataSourceClient, DataSourceClient>
+ DataSourceClientContainer;
+
+PyObject*
+DataSourceClient_findZone(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name;
+ if (PyArg_ParseTuple(args, "O!", &name_type, &name)) {
+ try {
+ DataSourceClient::FindResult find_result(
+ self->cppobj->findZone(PyName_ToName(name)));
+
+ result::Result r = find_result.code;
+ ZoneFinderPtr zfp = find_result.zone_finder;
+ // Use N instead of O so refcount isn't increased twice
+ return (Py_BuildValue("IN", r, createZoneFinderObject(zfp)));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
+ try {
+ return (createZoneIteratorObject(
+ self->cppobj->getIterator(PyName_ToName(name_obj))));
+ } catch (const isc::NotImplemented& ne) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ne.what());
+ return (NULL);
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ PyObject *replace_obj;
+ if (PyArg_ParseTuple(args, "O!O", &name_type, &name_obj, &replace_obj) &&
+ PyBool_Check(replace_obj)) {
+ bool replace = (replace_obj != Py_False);
+ try {
+ return (createZoneUpdaterObject(
+ self->cppobj->getUpdater(PyName_ToName(name_obj),
+ replace)));
+ } catch (const isc::NotImplemented& ne) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ne.what());
+ return (NULL);
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef DataSourceClient_methods[] = {
+ { "find_zone", reinterpret_cast<PyCFunction>(DataSourceClient_findZone),
+ METH_VARARGS, DataSourceClient_findZone_doc },
+ { "get_iterator",
+ reinterpret_cast<PyCFunction>(DataSourceClient_getIterator), METH_VARARGS,
+ DataSourceClient_getIterator_doc },
+ { "get_updater", reinterpret_cast<PyCFunction>(DataSourceClient_getUpdater),
+ METH_VARARGS, DataSourceClient_getUpdater_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+int
+DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
+ // TODO: we should use the factory function which hasn't been written
+ // yet. For now we hardcode the sqlite3 initialization, and pass it one
+ // string for the database file. (similar to how the 'old direct'
+ // sqlite3_ds code works)
+ try {
+ char* db_file_name;
+ if (PyArg_ParseTuple(args, "s", &db_file_name)) {
+ boost::shared_ptr<DatabaseAccessor> sqlite3_accessor(
+ new SQLite3Accessor(db_file_name, isc::dns::RRClass::IN()));
+ self->cppobj = new DatabaseClient(isc::dns::RRClass::IN(),
+ sqlite3_accessor);
+ return (0);
+ } else {
+ return (-1);
+ }
+
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct DataSourceClient object: " +
+ string(ex.what());
+ PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in constructing DataSourceClient");
+ return (-1);
+ }
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to DataSourceClient constructor");
+
+ return (-1);
+}
+
+void
+DataSourceClient_destroy(s_DataSourceClient* const self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+} // end anonymous namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_DataSourceClient
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject datasourceclient_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.DataSourceClient",
+ sizeof(s_DataSourceClient), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(DataSourceClient_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ DataSourceClient_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ DataSourceClient_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(DataSourceClient_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/client_python.h b/src/lib/python/isc/datasrc/client_python.h
new file mode 100644
index 0000000..b20fb6b
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_python.h
@@ -0,0 +1,35 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_CLIENT_H
+#define __PYTHON_DATASRC_CLIENT_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+extern PyTypeObject datasourceclient_type;
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_CLIENT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
new file mode 100644
index 0000000..4b0324a
--- /dev/null
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -0,0 +1,225 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include <structmember.h>
+
+#include <config.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include "datasrc.h"
+#include "client_python.h"
+#include "finder_python.h"
+#include "iterator_python.h"
+#include "updater_python.h"
+
+#include <util/python/pycppwrapper_util.h>
+#include <dns/python/pydnspp_common.h>
+
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyObject*
+getDataSourceException(const char* ex_name) {
+ PyObject* ex_obj = NULL;
+
+ PyObject* datasrc_module = PyImport_AddModule("isc.datasrc");
+ if (datasrc_module != NULL) {
+ PyObject* datasrc_dict = PyModule_GetDict(datasrc_module);
+ if (datasrc_dict != NULL) {
+ ex_obj = PyDict_GetItemString(datasrc_dict, ex_name);
+ }
+ }
+
+ if (ex_obj == NULL) {
+ ex_obj = PyExc_RuntimeError;
+ }
+ return (ex_obj);
+}
+
+} // end namespace python
+} // end namespace datasrc
+} // end namespace isc
+
+namespace {
+
+bool
+initModulePart_DataSourceClient(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&datasourceclient_type) < 0) {
+ return (false);
+ }
+ void* dscp = &datasourceclient_type;
+ if (PyModule_AddObject(mod, "DataSourceClient", static_cast<PyObject*>(dscp)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&datasourceclient_type);
+
+ addClassVariable(datasourceclient_type, "SUCCESS",
+ Py_BuildValue("I", result::SUCCESS));
+ addClassVariable(datasourceclient_type, "EXIST",
+ Py_BuildValue("I", result::EXIST));
+ addClassVariable(datasourceclient_type, "NOTFOUND",
+ Py_BuildValue("I", result::NOTFOUND));
+ addClassVariable(datasourceclient_type, "PARTIALMATCH",
+ Py_BuildValue("I", result::PARTIALMATCH));
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneFinder(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zonefinder_type) < 0) {
+ return (false);
+ }
+ void* zip = &zonefinder_type;
+ if (PyModule_AddObject(mod, "ZoneFinder", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zonefinder_type);
+
+ addClassVariable(zonefinder_type, "SUCCESS",
+ Py_BuildValue("I", ZoneFinder::SUCCESS));
+ addClassVariable(zonefinder_type, "DELEGATION",
+ Py_BuildValue("I", ZoneFinder::DELEGATION));
+ addClassVariable(zonefinder_type, "NXDOMAIN",
+ Py_BuildValue("I", ZoneFinder::NXDOMAIN));
+ addClassVariable(zonefinder_type, "NXRRSET",
+ Py_BuildValue("I", ZoneFinder::NXRRSET));
+ addClassVariable(zonefinder_type, "CNAME",
+ Py_BuildValue("I", ZoneFinder::CNAME));
+ addClassVariable(zonefinder_type, "DNAME",
+ Py_BuildValue("I", ZoneFinder::DNAME));
+
+ addClassVariable(zonefinder_type, "FIND_DEFAULT",
+ Py_BuildValue("I", ZoneFinder::FIND_DEFAULT));
+ addClassVariable(zonefinder_type, "FIND_GLUE_OK",
+ Py_BuildValue("I", ZoneFinder::FIND_GLUE_OK));
+ addClassVariable(zonefinder_type, "FIND_DNSSEC",
+ Py_BuildValue("I", ZoneFinder::FIND_DNSSEC));
+
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneIterator(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zoneiterator_type) < 0) {
+ return (false);
+ }
+ void* zip = &zoneiterator_type;
+ if (PyModule_AddObject(mod, "ZoneIterator", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zoneiterator_type);
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneUpdater(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zoneupdater_type) < 0) {
+ return (false);
+ }
+ void* zip = &zoneupdater_type;
+ if (PyModule_AddObject(mod, "ZoneUpdater", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zoneupdater_type);
+
+ return (true);
+}
+
+
+PyObject* po_DataSourceError;
+PyObject* po_NotImplemented;
+
+PyModuleDef iscDataSrc = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "datasrc",
+ "Python bindings for the classes in the isc::datasrc namespace.\n\n"
+ "These bindings are close match to the C++ API, but they are not complete "
+ "(some parts are not needed) and some are done in more python-like ways.",
+ -1,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+} // end anonymous namespace
+
+PyMODINIT_FUNC
+PyInit_datasrc(void) {
+ PyObject* mod = PyModule_Create(&iscDataSrc);
+ if (mod == NULL) {
+ return (NULL);
+ }
+
+ if (!initModulePart_DataSourceClient(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneFinder(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneIterator(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneUpdater(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ try {
+ po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
+ NULL);
+ PyObjectContainer(po_DataSourceError).installToModule(mod, "Error");
+ po_NotImplemented = PyErr_NewException("isc.datasrc.NotImplemented",
+ NULL, NULL);
+ PyObjectContainer(po_NotImplemented).installToModule(mod,
+ "NotImplemented");
+ } catch (...) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ return (mod);
+}
diff --git a/src/lib/python/isc/datasrc/datasrc.h b/src/lib/python/isc/datasrc/datasrc.h
new file mode 100644
index 0000000..d82881b
--- /dev/null
+++ b/src/lib/python/isc/datasrc/datasrc.h
@@ -0,0 +1,50 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_H
+#define __PYTHON_DATASRC_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+// Return a Python exception object of the given name (ex_name) defined in
+// the isc.datasrc.datasrc loadable module.
+//
+// Since the datasrc module is a different binary image and is loaded separately
+// from the dns module, it would be very tricky to directly access to
+// C/C++ symbols defined in that module. So we get access to these object
+// using the Python interpretor through this wrapper function.
+//
+// The __init__.py file should ensure isc.datasrc has been loaded by the time
+// whenever this function is called, and there shouldn't be any operation
+// within this function that can fail (such as dynamic memory allocation),
+// so this function should always succeed. Yet there may be an overlooked
+// failure mode, perhaps due to a bug in the binding implementation, or
+// due to invalid usage. As a last resort for such cases, this function
+// returns PyExc_RuntimeError (a C binding of Python's RuntimeError) should
+// it encounters an unexpected failure.
+extern PyObject* getDataSourceException(const char* ex_name);
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
+#endif // __PYTHON_ACL_DNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
new file mode 100644
index 0000000..2b47d02
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -0,0 +1,96 @@
+namespace {
+const char* const ZoneFinder_doc = "\
+The base class to search a zone for RRsets.\n\
+\n\
+The ZoneFinder class is a wrapper for the c++ base class for representing an\n\
+object that performs DNS lookups in a specific zone accessible via a\n\
+data source. In general, different types of data sources (in-memory,\n\
+database-based, etc) define their own derived c++ classes of ZoneFinder,\n\
+implementing ways to retrieve the required data through the common\n\
+interfaces declared in the base class. Each concrete ZoneFinder object\n\
+is therefore (conceptually) associated with a specific zone of one\n\
+specific data source instance.\n\
+\n\
+The origin name and the RR class of the associated zone are available\n\
+via the get_origin() and get_class() methods, respectively.\n\
+\n\
+The most important method of this class is find(), which performs the\n\
+lookup for a given domain and type. See the description of the method\n\
+for details.\n\
+\n\
+It's not clear whether we should request that a zone finder form a\n\
+\"transaction\", that is, whether to ensure the finder is not\n\
+susceptible to changes made by someone else than the creator of the\n\
+finder. If we don't request that, for example, two different lookup\n\
+results for the same name and type can be different if other threads\n\
+or programs make updates to the zone between the lookups. We should\n\
+revisit this point as we gain more experiences.\n\
+\n\
+";
+
+const char* const ZoneFinder_getOrigin_doc = "\
+get_origin() -> isc.dns.Name\n\
+\n\
+Return the origin name of the zone.\n\
+\n\
+";
+
+const char* const ZoneFinder_getClass_doc = "\
+get_class() -> isc.dns.RRClass\n\
+\n\
+Return the RR class of the zone.\n\
+\n\
+";
+
+const char* const ZoneFinder_find_doc = "\
+find(name, type, target=NULL, options=FIND_DEFAULT) -> (code, FindResult)\n\
+\n\
+Search the zone for a given pair of domain name and RR type.\n\
+\n\
+- If the search name belongs under a zone cut, it returns the code of\n\
+ DELEGATION and the NS RRset at the zone cut.\n\
+- If there is no matching name, it returns the code of NXDOMAIN, and,\n\
+ if DNSSEC is requested, the NSEC RRset that proves the non-\n\
+ existence.\n\
+- If there is a matching name but no RRset of the search type, it\n\
+ returns the code of NXRRSET, and, if DNSSEC is required, the NSEC\n\
+ RRset for that name.\n\
+- If there is a CNAME RR of the searched name but there is no RR of\n\
+ the searched type of the name (so this type is different from\n\
+ CNAME), it returns the code of CNAME and that CNAME RR. Note that if\n\
+ the searched RR type is CNAME, it is considered a successful match,\n\
+ and the code of SUCCESS will be returned.\n\
+- If the search name matches a delegation point of DNAME, it returns\n\
+ the code of DNAME and that DNAME RR.\n\
+- If the target is a list, all RRsets under the domain are inserted\n\
+ there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned\n\
+ instead of normall processing. This is intended to handle ANY query.\n\
+ : this behavior is controversial as we discussed in\n\
+ https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html\n\
+ We should revisit the interface before we heavily rely on it. The\n\
+ options parameter specifies customized behavior of the search. Their\n\
+ semantics is as follows:\n\
+ (This feature is disable at this time)\n\
+- GLUE_OK Allow search under a zone cut. By default the search will\n\
+ stop once it encounters a zone cut. If this option is specified it\n\
+ remembers information about the highest zone cut and continues the\n\
+ search until it finds an exact match for the given name or it\n\
+ detects there is no exact match. If an exact match is found, RRsets\n\
+ for that name are searched just like the normal case; otherwise, if\n\
+ the search has encountered a zone cut, DELEGATION with the\n\
+ information of the highest zone cut will be returned.\n\
+\n\
+This method raises an isc.datasrc.Error exception if there is an internal\n\
+error in the datasource.\n\
+\n\
+Parameters:\n\
+ name The domain name to be searched for.\n\
+ type The RR type to be searched for.\n\
+ target If target is not NULL, insert all RRs under the domain\n\
+ into it.\n\
+ options The search options.\n\
+\n\
+Return Value(s): A tuple of a result code an a FindResult object enclosing\n\
+the search result (see above).\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
new file mode 100644
index 0000000..598d300
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -0,0 +1,248 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+#include <datasrc/zone.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/rrclass_python.h>
+#include <dns/python/rrtype_python.h>
+#include <dns/python/pydnspp_common.h>
+
+#include "datasrc.h"
+#include "finder_python.h"
+#include "finder_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace isc_datasrc_internal {
+// This is the shared code for the find() call in the finder and the updater
+// Is is intentionally not available through any header, nor at our standard
+// namespace, as it is not supposed to be called anywhere but from finder and
+// updater
+PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
+ if (finder == NULL) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Internal error in find() wrapper; finder object NULL");
+ return (NULL);
+ }
+ PyObject *name;
+ PyObject *rrtype;
+ PyObject *target;
+ int options_int;
+ if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+ &rrtype_type, &rrtype,
+ &target, &options_int)) {
+ try {
+ ZoneFinder::FindOptions options =
+ static_cast<ZoneFinder::FindOptions>(options_int);
+ ZoneFinder::FindResult find_result(
+ finder->find(PyName_ToName(name),
+ PyRRType_ToRRType(rrtype),
+ NULL,
+ options
+ ));
+ ZoneFinder::Result r = find_result.code;
+ isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
+ if (rrsp) {
+ // Use N instead of O so the refcount isn't increased twice
+ return (Py_BuildValue("IN", r, createRRsetObject(*rrsp)));
+ } else {
+ return (Py_BuildValue("IO", r, Py_None));
+ }
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+ return Py_BuildValue("I", 1);
+}
+
+} // end namespace internal
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneFinder : public PyObject {
+public:
+ s_ZoneFinder() : cppobj(ZoneFinderPtr()) {};
+ ZoneFinderPtr cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneFinder, ZoneFinder> ZoneFinderContainer;
+
+// General creation and destruction
+int
+ZoneFinder_init(s_ZoneFinder* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneFinder cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneFinder_destroy(s_ZoneFinder* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+ZoneFinder_getClass(PyObject* po_self, PyObject*) {
+ s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
+ try {
+ return (createRRClassObject(self->cppobj->getClass()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneFinder_getOrigin(PyObject* po_self, PyObject*) {
+ s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
+ try {
+ return (createNameObject(self->cppobj->getOrigin()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneFinder_find(PyObject* po_self, PyObject* args) {
+ s_ZoneFinder* const self = static_cast<s_ZoneFinder*>(po_self);
+ return (isc_datasrc_internal::ZoneFinder_helper(self->cppobj.get(), args));
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef ZoneFinder_methods[] = {
+ { "get_origin", reinterpret_cast<PyCFunction>(ZoneFinder_getOrigin),
+ METH_NOARGS, ZoneFinder_getOrigin_doc },
+ { "get_class", reinterpret_cast<PyCFunction>(ZoneFinder_getClass),
+ METH_NOARGS, ZoneFinder_getClass_doc },
+ { "find", reinterpret_cast<PyCFunction>(ZoneFinder_find), METH_VARARGS,
+ ZoneFinder_find_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+PyTypeObject zonefinder_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneFinder",
+ sizeof(s_ZoneFinder), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneFinder_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneFinder_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ ZoneFinder_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneFinder_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneFinderObject(isc::datasrc::ZoneFinderPtr source) {
+ s_ZoneFinder* py_zi = static_cast<s_ZoneFinder*>(
+ zonefinder_type.tp_alloc(&zonefinder_type, 0));
+ if (py_zi != NULL) {
+ py_zi->cppobj = source;
+ }
+ return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/finder_python.h b/src/lib/python/isc/datasrc/finder_python.h
new file mode 100644
index 0000000..5f2404e
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_python.h
@@ -0,0 +1,36 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_FINDER_H
+#define __PYTHON_DATASRC_FINDER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+
+namespace python {
+
+extern PyTypeObject zonefinder_type;
+
+PyObject* createZoneFinderObject(isc::datasrc::ZoneFinderPtr source);
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_FINDER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/iterator_inc.cc b/src/lib/python/isc/datasrc/iterator_inc.cc
new file mode 100644
index 0000000..b1d9d25
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_inc.cc
@@ -0,0 +1,34 @@
+namespace {
+
+const char* const ZoneIterator_doc = "\
+Read-only iterator to a zone.\n\
+\n\
+You can get an instance of the ZoneIterator from\n\
+DataSourceClient.get_iterator() method. The actual concrete\n\
+c++ implementation will be different depending on the actual data source\n\
+used. This is the abstract interface.\n\
+\n\
+There's no way to start iterating from the beginning again or return.\n\
+\n\
+The ZoneIterator is a python iterator, and can be iterated over directly.\n\
+";
+
+const char* const ZoneIterator_getNextRRset_doc = "\
+get_next_rrset() -> isc.dns.RRset\n\
+\n\
+Get next RRset from the zone.\n\
+\n\
+This returns the next RRset in the zone.\n\
+\n\
+Any special order is not guaranteed.\n\
+\n\
+While this can potentially throw anything (including standard\n\
+allocation errors), it should be rare.\n\
+\n\
+Pointer to the next RRset or None pointer when the iteration gets to\n\
+the end of the zone.\n\
+\n\
+Raises an isc.datasrc.Error exception if it is called again after returning\n\
+None\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/iterator_python.cc b/src/lib/python/isc/datasrc/iterator_python.cc
new file mode 100644
index 0000000..b482ea6
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_python.cc
@@ -0,0 +1,202 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "iterator_python.h"
+
+#include "iterator_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneIterator : public PyObject {
+public:
+ s_ZoneIterator() : cppobj(ZoneIteratorPtr()) {};
+ ZoneIteratorPtr cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneIterator, ZoneIterator>
+ ZoneIteratorContainer;
+
+// General creation and destruction
+int
+ZoneIterator_init(s_ZoneIterator* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneIterator cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneIterator_destroy(s_ZoneIterator* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneIterator_getNextRRset(PyObject* po_self, PyObject*) {
+ s_ZoneIterator* self = static_cast<s_ZoneIterator*>(po_self);
+ if (!self->cppobj) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "get_next_rrset() called past end of iterator");
+ return (NULL);
+ }
+ try {
+ isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextRRset();
+ if (!rrset) {
+ Py_RETURN_NONE;
+ }
+ return (createRRsetObject(*rrset));
+ } catch (const isc::Exception& isce) {
+ // isc::Unexpected is thrown when we call getNextRRset() when we are
+ // already done iterating ('iterating past end')
+ // We could also simply return None again
+ PyErr_SetString(getDataSourceException("Error"), isce.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneIterator_iter(PyObject *self) {
+ Py_INCREF(self);
+ return (self);
+}
+
+PyObject*
+ZoneIterator_next(PyObject* self) {
+ PyObject *result = ZoneIterator_getNextRRset(self, NULL);
+ // iter_next must return NULL without error instead of Py_None
+ if (result == Py_None) {
+ Py_DECREF(result);
+ return (NULL);
+ } else {
+ return (result);
+ }
+}
+
+PyMethodDef ZoneIterator_methods[] = {
+ { "get_next_rrset",
+ reinterpret_cast<PyCFunction>(ZoneIterator_getNextRRset), METH_NOARGS,
+ ZoneIterator_getNextRRset_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject zoneiterator_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneIterator",
+ sizeof(s_ZoneIterator), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneIterator_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneIterator_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ ZoneIterator_iter, // tp_iter
+ ZoneIterator_next, // tp_iternext
+ ZoneIterator_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneIterator_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source) {
+ s_ZoneIterator* py_zi = static_cast<s_ZoneIterator*>(
+ zoneiterator_type.tp_alloc(&zoneiterator_type, 0));
+ if (py_zi != NULL) {
+ py_zi->cppobj = source;
+ }
+ return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/iterator_python.h b/src/lib/python/isc/datasrc/iterator_python.h
new file mode 100644
index 0000000..b457740
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_python.h
@@ -0,0 +1,38 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_ITERATOR_H
+#define __PYTHON_DATASRC_ITERATOR_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+extern PyTypeObject zoneiterator_type;
+
+PyObject* createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_ITERATOR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index a77645a..fd63741 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -33,44 +33,63 @@ def create(cur):
Arguments:
cur - sqlite3 cursor.
"""
- cur.execute("CREATE TABLE schema_version (version INTEGER NOT NULL)")
- cur.execute("INSERT INTO schema_version VALUES (1)")
- cur.execute("""CREATE TABLE zones (id INTEGER PRIMARY KEY,
- name STRING NOT NULL COLLATE NOCASE,
- rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN',
- dnssec BOOLEAN NOT NULL DEFAULT 0)""")
- cur.execute("CREATE INDEX zones_byname ON zones (name)")
- cur.execute("""CREATE TABLE records (id INTEGER PRIMARY KEY,
- zone_id INTEGER NOT NULL,
- name STRING NOT NULL COLLATE NOCASE,
- rname STRING NOT NULL COLLATE NOCASE,
- ttl INTEGER NOT NULL,
- rdtype STRING NOT NULL COLLATE NOCASE,
- sigtype STRING COLLATE NOCASE,
- rdata STRING NOT NULL)""")
- cur.execute("CREATE INDEX records_byname ON records (name)")
- cur.execute("CREATE INDEX records_byrname ON records (rname)")
- cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
- zone_id INTEGER NOT NULL,
- hash STRING NOT NULL COLLATE NOCASE,
- owner STRING NOT NULL COLLATE NOCASE,
- ttl INTEGER NOT NULL,
- rdtype STRING NOT NULL COLLATE NOCASE,
- rdata STRING NOT NULL)""")
- cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
-
-def open(dbfile):
+ # We are creating the database because it apparently had not been at
+ # the time we tried to read from it. However, another process may have
+ # had the same idea, resulting in a potential race condition.
+ # Therefore, we obtain an exclusive lock before we create anything
+ # When we have it, we check *again* whether the database has been
+ # initialized. If not, we do so.
+
+ # If the database is perpetually locked, it'll time out automatically
+ # and we just let it fail.
+ cur.execute("BEGIN EXCLUSIVE TRANSACTION")
+ try:
+ cur.execute("SELECT version FROM schema_version")
+ row = cur.fetchone()
+ except sqlite3.OperationalError:
+ cur.execute("CREATE TABLE schema_version (version INTEGER NOT NULL)")
+ cur.execute("INSERT INTO schema_version VALUES (1)")
+ cur.execute("""CREATE TABLE zones (id INTEGER PRIMARY KEY,
+ name STRING NOT NULL COLLATE NOCASE,
+ rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN',
+ dnssec BOOLEAN NOT NULL DEFAULT 0)""")
+ cur.execute("CREATE INDEX zones_byname ON zones (name)")
+ cur.execute("""CREATE TABLE records (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ name STRING NOT NULL COLLATE NOCASE,
+ rname STRING NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdtype STRING NOT NULL COLLATE NOCASE,
+ sigtype STRING COLLATE NOCASE,
+ rdata STRING NOT NULL)""")
+ cur.execute("CREATE INDEX records_byname ON records (name)")
+ cur.execute("CREATE INDEX records_byrname ON records (rname)")
+ cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ hash STRING NOT NULL COLLATE NOCASE,
+ owner STRING NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdtype STRING NOT NULL COLLATE NOCASE,
+ rdata STRING NOT NULL)""")
+ cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
+ row = [1]
+ cur.execute("COMMIT TRANSACTION")
+ return row
+
+def open(dbfile, connect_timeout=5.0):
""" Open a database, if the database is not yet set up, call create
to do so. It may raise Sqlite3DSError if failed to open sqlite3
database file or find bad database schema version in the database.
Arguments:
dbfile - the filename for the sqlite3 database.
+ connect_timeout - timeout for opening the database or acquiring locks
+ defaults to sqlite3 module's default of 5.0 seconds
Return sqlite3 connection, sqlite3 cursor.
"""
try:
- conn = sqlite3.connect(dbfile)
+ conn = sqlite3.connect(dbfile, timeout=connect_timeout)
cur = conn.cursor()
except Exception as e:
fail = "Failed to open " + dbfile + ": " + e.args[0]
@@ -80,10 +99,13 @@ def open(dbfile):
try:
cur.execute("SELECT version FROM schema_version")
row = cur.fetchone()
- except:
- create(cur)
- conn.commit()
- row = [1]
+ except sqlite3.OperationalError:
+ # temporarily disable automatic transactions so
+ # we can do our own
+ iso_lvl = conn.isolation_level
+ conn.isolation_level = None
+ row = create(cur)
+ conn.isolation_level = iso_lvl
if row == None or row[0] != 1:
raise Sqlite3DSError("Bad database schema version")
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 6f6d157..be30dfa 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -1,16 +1,18 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = master_test.py sqlite3_ds_test.py
+# old tests, TODO remove or change to use new API?
+#PYTESTS = master_test.py sqlite3_ds_test.py
+PYTESTS = datasrc_test.py
EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += testdata/brokendb.sqlite3
EXTRA_DIST += testdata/example.com.sqlite3
-CLEANFILES = $(abs_builddir)/example.com.out.sqlite3
+CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -23,7 +25,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
+ PYTHONPATH=:$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/python/isc/datasrc/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs \
TESTDATA_PATH=$(abs_srcdir)/testdata \
TESTDATA_WRITE_PATH=$(abs_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
new file mode 100644
index 0000000..15ceb80
--- /dev/null
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -0,0 +1,389 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+import isc.datasrc
+import isc.dns
+import unittest
+import os
+import shutil
+
+TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
+TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
+
+READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
+BROKEN_DB_FILE = TESTDATA_PATH + "brokendb.sqlite3"
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied"
+NEW_DB_FILE = TESTDATA_WRITE_PATH + "new_db.sqlite3"
+
+def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
+ rrset_to_add = isc.dns.RRset(name, rrclass, rrtype, ttl)
+ if rdatas is not None:
+ for rdata in rdatas:
+ rrset_to_add.add_rdata(isc.dns.Rdata(rrtype, rrclass, rdata))
+ rrset_list.append(rrset_to_add)
+
+# helper function, we have no direct rrset comparison atm
+def rrsets_equal(a, b):
+ # no accessor for sigs either (so this only checks name, class, type, ttl,
+ # and rdata)
+ # also, because of the fake data in rrsigs, if the type is rrsig, the
+ # rdata is not checked
+ return a.get_name() == b.get_name() and\
+ a.get_class() == b.get_class() and\
+ a.get_type() == b.get_type() and \
+ a.get_ttl() == b.get_ttl() and\
+ (a.get_type() == isc.dns.RRType.RRSIG() or
+ sorted(a.get_rdata()) == sorted(b.get_rdata()))
+
+# returns true if rrset is in expected_rrsets
+# will remove the rrset from expected_rrsets if found
+def check_for_rrset(expected_rrsets, rrset):
+ for cur_rrset in expected_rrsets[:]:
+ if rrsets_equal(cur_rrset, rrset):
+ expected_rrsets.remove(cur_rrset)
+ return True
+ return False
+
+class DataSrcClient(unittest.TestCase):
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneIterator)
+
+
+ def test_iterate(self):
+ dsc = isc.datasrc.DataSourceClient(READ_ZONE_DB_FILE)
+
+ # for RRSIGS, the TTL's are currently modified. This test should
+ # start failing when we fix that.
+ rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
+
+ # we do not know the order in which they are returned by the iterator
+ # but we do want to check them, so we put all records into one list
+ # sort it (doesn't matter which way it is sorted, as long as it is
+ # sorted)
+
+ # RRset is (atm) an unorderable type, and within an rrset, the
+ # rdatas and rrsigs may also be in random order. In theory the
+ # rrsets themselves can be returned in any order.
+ #
+ # So we create a second list with all rrsets we expect, and for each
+ # rrset we get from the iterator, see if it is in that list, and
+ # remove it.
+ #
+ # When the iterator is empty, we check no rrsets are left in the
+ # list of expected ones
+ expected_rrset_list = []
+
+ name = isc.dns.Name("sql1.example.com")
+ rrclass = isc.dns.RRClass.IN()
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ [
+ "256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
+ "N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
+ "TlALxMmspcfdpqun3Yr3YYnztuj06rV7RqmveYckWvAUXVYMSMQZfJ30"+
+ "5fs0dE/xLztL/CzZ",
+ "257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
+ "KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
+ "ZIyvcKq+9RXmV3HK3bUdHnQZ88IZWBRmWKfZ6wnzHo53kdYKAemTErkz"+
+ "taX3lRRPLYWpxRcDPEjysXT3Lh0vfL5D+CIO1yKw/q7C+v6+/kYAxc2l"+
+ "fbNE3HpklSuF+dyX4nXxWgzbcFuLz5Bwfq6ZJ9RYe/kNkA0uMWNa1KkG"+
+ "eRh8gg22kgD/KT5hPTnpezUWLvoY5Qc7IB3T0y4n2JIwiF2ZrZYVrWgD"+
+ "jRWAzGsxJiJyjd6w2k0="
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ [
+ "dns01.example.com.",
+ "dns02.example.com.",
+ "dns03.example.com."
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ [
+ "www.sql1.example.com. NS SOA RRSIG NSEC DNSKEY"
+ ])
+ # For RRSIGS, we can't add the fake data through the API, so we
+ # simply pass no rdata at all (which is skipped by the check later)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
+ [
+ "master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
+ ])
+ name = isc.dns.Name("www.sql1.example.com.")
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.A(), isc.dns.RRTTL(3600),
+ [
+ "192.0.2.100"
+ ])
+ name = isc.dns.Name("www.sql1.example.com.")
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ [
+ "sql1.example.com. A RRSIG NSEC"
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+
+ # rrs is an iterator, but also has direct get_next_rrset(), use
+ # the latter one here
+ rrset_to_check = rrs.get_next_rrset()
+ while (rrset_to_check != None):
+ self.assertTrue(check_for_rrset(expected_rrset_list,
+ rrset_to_check),
+ "Unexpected rrset returned by iterator:\n" +
+ rrset_to_check.to_text())
+ rrset_to_check = rrs.get_next_rrset()
+
+ # Now check there are none left
+ self.assertEqual(0, len(expected_rrset_list),
+ "RRset(s) not returned by iterator: " +
+ str([rrset.to_text() for rrset in expected_rrset_list ]
+ ))
+
+ # TODO should we catch this (iterating past end) and just return None
+ # instead of failing?
+ self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"))
+ # there are more than 80 RRs in this zone... let's just count them
+ # (already did a full check of the smaller zone above)
+ self.assertEqual(55, len(list(rrets)))
+ # TODO should we catch this (iterating past end) and just return None
+ # instead of failing?
+ self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+
+ self.assertRaises(TypeError, dsc.get_iterator, "asdf")
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneFinder)
+
+ def test_find(self):
+ dsc = isc.datasrc.DataSourceClient(READ_ZONE_DB_FILE)
+
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.sql1.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.DELEGATION, result)
+ self.assertEqual("sql1.example.com. 3600 IN NS dns01.example.com.\n" +
+ "sql1.example.com. 3600 IN NS dns02.example.com.\n" +
+ "sql1.example.com. 3600 IN NS dns03.example.com.\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("doesnotexist.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.some.other.domain"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.TXT(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXRRSET, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("cname-ext.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.CNAME, result)
+ self.assertEqual(
+ "cname-ext.example.com. 3600 IN CNAME www.sql1.example.com.\n",
+ rrset.to_text())
+
+ self.assertRaises(TypeError, finder.find,
+ "foo",
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertRaises(TypeError, finder.find,
+ isc.dns.Name("cname-ext.example.com"),
+ "foo",
+ None,
+ finder.FIND_DEFAULT)
+ self.assertRaises(TypeError, finder.find,
+ isc.dns.Name("cname-ext.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ "foo")
+
+
+class DataSrcUpdater(unittest.TestCase):
+
+ def setUp(self):
+ # Make a fresh copy of the writable database with all original content
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneUpdater)
+
+ def test_update_delete_commit(self):
+
+ dsc = isc.datasrc.DataSourceClient(WRITE_ZONE_DB_FILE)
+
+ # first make sure, through a separate finder, that some record exists
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ rrset_to_delete = rrset;
+
+ # can't delete rrset with associated sig. Abuse that to force an
+ # exception first, then remove the sig, then delete the record
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ self.assertRaises(isc.datasrc.Error, updater.delete_rrset,
+ rrset_to_delete)
+
+ rrset_to_delete.remove_rrsig()
+
+ updater.delete_rrset(rrset_to_delete)
+
+ # The record should be gone in the updater, but not in the original
+ # finder (since we have not committed)
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ updater.commit()
+ # second commit should raise exception
+ self.assertRaises(isc.datasrc.Error, updater.commit)
+
+ # the record should be gone now in the 'real' finder as well
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ # now add it again
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ updater.add_rrset(rrset_to_delete)
+ updater.commit()
+
+ # second commit should throw
+ self.assertRaises(isc.datasrc.Error, updater.commit)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ def test_update_delete_abort(self):
+ dsc = isc.datasrc.DataSourceClient(WRITE_ZONE_DB_FILE)
+
+ # first make sure, through a separate finder, that some record exists
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ rrset_to_delete = rrset;
+
+ # can't delete rrset with associated sig. Abuse that to force an
+ # exception first, then remove the sig, then delete the record
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ self.assertRaises(isc.datasrc.Error, updater.delete_rrset,
+ rrset_to_delete)
+
+ rrset_to_delete.remove_rrsig()
+
+ updater.delete_rrset(rrset_to_delete)
+
+ # The record should be gone in the updater, but not in the original
+ # finder (since we have not committed)
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ # destroy the updater, which should make it roll back
+ updater = None
+
+ # the record should still be available in the 'real' finder as well
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ unittest.main()
diff --git a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
index 707994f..10c61cf 100644
--- a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
+++ b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
@@ -23,8 +23,9 @@ TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
-WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "example.com.out.sqlite3"
BROKEN_DB_FILE = TESTDATA_PATH + "brokendb.sqlite3"
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "example.com.out.sqlite3"
+NEW_DB_FILE = TESTDATA_WRITE_PATH + "new_db.sqlite3"
def example_reader():
my_zone = [
@@ -91,5 +92,52 @@ class TestSqlite3_ds(unittest.TestCase):
# and make sure lock does not stay
sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
+class NewDBFile(unittest.TestCase):
+ def tearDown(self):
+ # remove the created database after every test
+ if (os.path.exists(NEW_DB_FILE)):
+ os.remove(NEW_DB_FILE)
+
+ def setUp(self):
+ # remove the created database before every test too, just
+ # in case a test got aborted half-way, and cleanup didn't occur
+ if (os.path.exists(NEW_DB_FILE)):
+ os.remove(NEW_DB_FILE)
+
+ def test_new_db(self):
+ self.assertFalse(os.path.exists(NEW_DB_FILE))
+ sqlite3_ds.open(NEW_DB_FILE)
+ self.assertTrue(os.path.exists(NEW_DB_FILE))
+
+ def test_new_db_locked(self):
+ self.assertFalse(os.path.exists(NEW_DB_FILE))
+ con = sqlite3.connect(NEW_DB_FILE);
+ con.isolation_level = None
+ cur = con.cursor()
+ cur.execute("BEGIN IMMEDIATE TRANSACTION")
+
+ # load should now fail, since the database is locked,
+ # and the open() call needs an exclusive lock
+ self.assertRaises(sqlite3.OperationalError,
+ sqlite3_ds.open, NEW_DB_FILE, 0.1)
+
+ con.rollback()
+ cur.close()
+ con.close()
+ self.assertTrue(os.path.exists(NEW_DB_FILE))
+
+ # now that we closed our connection, load should work again
+ sqlite3_ds.open(NEW_DB_FILE)
+
+ # the database should now have been created, and a new load should
+ # not require an exclusive lock anymore, so we lock it again
+ con = sqlite3.connect(NEW_DB_FILE);
+ cur = con.cursor()
+ cur.execute("BEGIN IMMEDIATE TRANSACTION")
+ sqlite3_ds.open(NEW_DB_FILE, 0.1)
+ con.rollback()
+ cur.close()
+ con.close()
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/datasrc/updater_inc.cc b/src/lib/python/isc/datasrc/updater_inc.cc
new file mode 100644
index 0000000..32715ec
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_inc.cc
@@ -0,0 +1,181 @@
+namespace {
+
+const char* const ZoneUpdater_doc = "\
+The base class to make updates to a single zone.\n\
+\n\
+On construction, each derived class object will start a\n\
+\"transaction\" for making updates to a specific zone (this means a\n\
+constructor of a derived class would normally take parameters to\n\
+identify the zone to be updated). The underlying realization of a\n\
+\"transaction\" will differ for different derived classes; if it uses\n\
+a general purpose database as a backend, it will involve performing\n\
+some form of \"begin transaction\" statement for the database.\n\
+\n\
+Updates (adding or deleting RRs) are made via add_rrset() and\n\
+delete_rrset() methods. Until the commit() method is called the\n\
+changes are local to the updater object. For example, they won't be\n\
+visible via a ZoneFinder object, but only by the updater's own find()\n\
+method. The commit() completes the transaction and makes the changes\n\
+visible to others.\n\
+\n\
+This class does not provide an explicit \"rollback\" interface. If\n\
+something wrong or unexpected happens during the updates and the\n\
+caller wants to cancel the intermediate updates, the caller should\n\
+simply destroy the updater object without calling commit(). The\n\
+destructor is supposed to perform the \"rollback\" operation,\n\
+depending on the internal details of the derived class.\n\
+\n\
+This initial implementation provides a quite simple interface of\n\
+adding and deleting RRs (see the description of the related methods).\n\
+It may be revisited as we gain more experiences.\n\
+\n\
+";
+
+const char* const ZoneUpdater_addRRset_doc = "\
+add_rrset(rrset) -> No return value\n\
+\n\
+Add an RRset to a zone via the updater.\n\
+It performs a few basic checks:\n\
+- Whether the RR class is identical to that for the zone to be updated\n\
+- Whether the RRset is not empty, i.e., it has at least one RDATA\n\
+- Whether the RRset is not associated with an RRSIG, i.e., whether\n\
+ get_rrsig() on the RRset returns a NULL pointer.\n\
+\n\
+and otherwise does not check any oddity. For example, it doesn't check\n\
+whether the owner name of the specified RRset is a subdomain of the\n\
+zone's origin; it doesn't care whether or not there is already an\n\
+RRset of the same name and RR type in the zone, and if there is,\n\
+whether any of the existing RRs have duplicate RDATA with the added\n\
+ones. If these conditions matter the calling application must examine\n\
+the existing data beforehand using the ZoneFinder returned by\n\
+get_finder().\n\
+\n\
+The validation requirement on the associated RRSIG is temporary. If we\n\
+find it more reasonable and useful to allow adding a pair of RRset and\n\
+its RRSIG RRset as we gain experiences with the interface, we may\n\
+remove this restriction. Until then we explicitly check it to prevent\n\
+accidental misuse.\n\
+\n\
+Conceptually, on successful call to this method, the zone will have\n\
+the specified RRset, and if there is already an RRset of the same name\n\
+and RR type, these two sets will be \"merged\". \"Merged\" means that\n\
+a subsequent call to ZoneFinder.find() for the name and type will\n\
+result in success and the returned RRset will contain all previously\n\
+existing and newly added RDATAs with the TTL being the minimum of the\n\
+two RRsets. The underlying representation of the \"merged\" RRsets may\n\
+vary depending on the characteristic of the underlying data source.\n\
+For example, if it uses a general purpose database that stores each RR\n\
+of the same RRset separately, it may simply be a larger sets of RRs\n\
+based on both the existing and added RRsets; the TTLs of the RRs may\n\
+be different within the database, and there may even be duplicate RRs\n\
+in different database rows. As long as the RRset returned via\n\
+ZoneFinder.find() conforms to the concept of \"merge\", the actual\n\
+internal representation is up to the implementation.\n\
+\n\
+This method must not be called once commit() is performed. If it calls\n\
+after commit() the implementation must throw a isc.datasrc.Error\n\
+exception.\n\
+\n\
+Todo As noted above we may have to revisit the design details as we\n\
+gain experiences:\n\
+\n\
+- we may want to check (and maybe reject) if there is already a\n\
+ duplicate RR (that has the same RDATA).\n\
+- we may want to check (and maybe reject) if there is already an RRset\n\
+ of the same name and RR type with different TTL\n\
+- we may even want to check if there is already any RRset of the same\n\
+ name and RR type.\n\
+- we may want to add an \"options\" parameter that can control the\n\
+ above points\n\
+- we may want to have this method return a value containing the\n\
+ information on whether there's a duplicate, etc.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Called after commit(), RRset is invalid (see above),\n\
+ internal data source error, or wrapper error\n\
+\n\
+Parameters:\n\
+ rrset The RRset to be added\n\
+\n\
+";
+
+const char* const ZoneUpdater_deleteRRset_doc = "\
+delete_rrset(rrset) -> No return value\n\
+\n\
+Delete an RRset from a zone via the updater.\n\
+\n\
+Like add_rrset(), the detailed semantics and behavior of this method\n\
+may have to be revisited in a future version. The following are based\n\
+on the initial implementation decisions.\n\
+\n\
+- Existing RRs that don't match any of the specified RDATAs will\n\
+ remain in the zone.\n\
+- Any RRs of the specified RRset that doesn't exist in the zone will\n\
+ simply be ignored; the implementation of this method is not supposed\n\
+ to check that condition.\n\
+- The TTL of the RRset is ignored; matching is only performed by the\n\
+ owner name, RR type and RDATA\n\
+\n\
+Ignoring the TTL may not look sensible, but it's based on the\n\
+observation that it will result in more intuitive result, especially\n\
+when the underlying data source is a general purpose database. See\n\
+also the c++ documentation of DatabaseAccessor::DeleteRecordInZone()\n\
+on this point. It also matches the dynamic update protocol (RFC2136),\n\
+where TTLs are ignored when deleting RRs.\n\
+\n\
+This method performs a limited level of validation on the specified\n\
+RRset:\n\
+- Whether the RR class is identical to that for the zone to be updated\n\
+- Whether the RRset is not empty, i.e., it has at least one RDATA\n\
+- Whether the RRset is not associated with an RRSIG\n\
+\n\
+This method must not be called once commit() is performed. If it calls\n\
+after commit() the implementation must throw a isc.datasrc.Error\n\
+exception.\n\
+\n\
+Todo: As noted above we may have to revisit the design details as we\n\
+gain experiences:\n\
+\n\
+- we may want to check (and maybe reject) if some or all of the RRs\n\
+ for the specified RRset don't exist in the zone\n\
+- we may want to allow an option to \"delete everything\" for\n\
+ specified name and/or specified name + RR type.\n\
+- as mentioned above, we may want to include the TTL in matching the\n\
+ deleted RRs\n\
+- we may want to add an \"options\" parameter that can control the\n\
+ above points\n\
+- we may want to have this method return a value containing the\n\
+ information on whether there's any RRs that are specified but don't\n\
+ exit, the number of actually deleted RRs, etc.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Called after commit(), RRset is invalid (see above),\n\
+ internal data source error\n\
+ std.bad_alloc Resource allocation failure\n\
+\n\
+Parameters:\n\
+ rrset The RRset to be deleted\n\
+\n\
+";
+
+const char* const ZoneUpdater_commit_doc = "\
+commit() -> void\n\
+\n\
+Commit the updates made in the updater to the zone.\n\
+\n\
+This method completes the \"transaction\" started at the creation of\n\
+the updater. After successful completion of this method, the updates\n\
+will be visible outside the scope of the updater. The actual internal\n\
+behavior will defer for different derived classes. For a derived class\n\
+with a general purpose database as a backend, for example, this method\n\
+would perform a \"commit\" statement for the database.\n\
+\n\
+This operation can only be performed at most once. A duplicate call\n\
+must result in a isc.datasrc.Error exception.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Duplicate call of the method, internal data source\n\
+ error, or wrapper error\n\\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/updater_python.cc b/src/lib/python/isc/datasrc/updater_python.cc
new file mode 100644
index 0000000..a9dc581
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_python.cc
@@ -0,0 +1,318 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/zone.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/rrclass_python.h>
+#include <dns/python/rrtype_python.h>
+
+#include "datasrc.h"
+#include "updater_python.h"
+
+#include "updater_inc.cc"
+#include "finder_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace isc_datasrc_internal {
+// See finder_python.cc
+PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args);
+}
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneUpdater : public PyObject {
+public:
+ s_ZoneUpdater() : cppobj(ZoneUpdaterPtr()) {};
+ ZoneUpdaterPtr cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneUpdater, ZoneUpdater> ZoneUpdaterContainer;
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+// General creation and destruction
+int
+ZoneUpdater_init(s_ZoneUpdater* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneUpdater cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneUpdater_destroy(s_ZoneUpdater* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+ZoneUpdater_addRRset(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject* rrset_obj;
+ if (PyArg_ParseTuple(args, "O!", &rrset_type, &rrset_obj)) {
+ try {
+ self->cppobj->addRRset(PyRRset_ToRRset(rrset_obj));
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_deleteRRset(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject* rrset_obj;
+ if (PyArg_ParseTuple(args, "O!", &rrset_type, &rrset_obj)) {
+ try {
+ self->cppobj->deleteRRset(PyRRset_ToRRset(rrset_obj));
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_commit(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ self->cppobj->commit();
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_getClass(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ return (createRRClassObject(self->cppobj->getFinder().getClass()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_getOrigin(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ return (createNameObject(self->cppobj->getFinder().getOrigin()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_find(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ return (isc_datasrc_internal::ZoneFinder_helper(&self->cppobj->getFinder(),
+ args));
+}
+
+PyObject*
+AZoneUpdater_find(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject *name;
+ PyObject *rrtype;
+ PyObject *target;
+ int options_int;
+ if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+ &rrtype_type, &rrtype,
+ &target, &options_int)) {
+ try {
+ ZoneFinder::FindOptions options =
+ static_cast<ZoneFinder::FindOptions>(options_int);
+ ZoneFinder::FindResult find_result(
+ self->cppobj->getFinder().find(PyName_ToName(name),
+ PyRRType_ToRRType(rrtype),
+ NULL,
+ options
+ ));
+ ZoneFinder::Result r = find_result.code;
+ isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
+ if (rrsp) {
+ // Use N instead of O so the refcount isn't increased twice
+ return Py_BuildValue("IN", r, createRRsetObject(*rrsp));
+ } else {
+ return Py_BuildValue("IO", r, Py_None);
+ }
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+ return Py_BuildValue("I", 1);
+}
+
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef ZoneUpdater_methods[] = {
+ { "add_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_addRRset),
+ METH_VARARGS, ZoneUpdater_addRRset_doc },
+ { "delete_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_deleteRRset),
+ METH_VARARGS, ZoneUpdater_deleteRRset_doc },
+ { "commit", reinterpret_cast<PyCFunction>(ZoneUpdater_commit), METH_NOARGS,
+ ZoneUpdater_commit_doc },
+ // Instead of a getFinder, we implement the finder functionality directly
+ // This is because ZoneFinder is non-copyable, and we should not create
+ // a ZoneFinder object from a reference only (which is what is returned
+ // by getFinder(). Apart from that
+ { "get_origin", reinterpret_cast<PyCFunction>(ZoneUpdater_getOrigin),
+ METH_NOARGS, ZoneFinder_getOrigin_doc },
+ { "get_class", reinterpret_cast<PyCFunction>(ZoneUpdater_getClass),
+ METH_NOARGS, ZoneFinder_getClass_doc },
+ { "find", reinterpret_cast<PyCFunction>(ZoneUpdater_find), METH_VARARGS,
+ ZoneFinder_find_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject zoneupdater_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneUpdater",
+ sizeof(s_ZoneUpdater), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneUpdater_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneUpdater_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ ZoneUpdater_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneUpdater_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source) {
+ s_ZoneUpdater* py_zi = static_cast<s_ZoneUpdater*>(
+ zoneupdater_type.tp_alloc(&zoneupdater_type, 0));
+ if (py_zi != NULL) {
+ py_zi->cppobj = source;
+ }
+ return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/updater_python.h b/src/lib/python/isc/datasrc/updater_python.h
new file mode 100644
index 0000000..3886aa3
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_python.h
@@ -0,0 +1,39 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_UPDATER_H
+#define __PYTHON_DATASRC_UPDATER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+
+extern PyTypeObject zoneupdater_type;
+
+PyObject* createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_UPDATER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/dns/Makefile.am b/src/lib/python/isc/dns/Makefile.am
new file mode 100644
index 0000000..161c2a5
--- /dev/null
+++ b/src/lib/python/isc/dns/Makefile.am
@@ -0,0 +1,7 @@
+python_PYTHON = __init__.py
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
+
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index aa12664..5bb6a94 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -185,7 +185,7 @@ init(PyObject*, PyObject* args) {
Py_RETURN_NONE;
}
-// This initialization is for unit tests. It allows message settings to be
+// This initialization is for unit tests. It allows message settings to
// be determined by a set of B10_xxx environment variables. (See the
// description of initLogger() for more details.) The function has been named
// resetUnitTestRootLogger() here as being more descriptive and
diff --git a/src/lib/python/isc/log/tests/Makefile.am b/src/lib/python/isc/log/tests/Makefile.am
index 6bb67de..170eee6 100644
--- a/src/lib/python/isc/log/tests/Makefile.am
+++ b/src/lib/python/isc/log/tests/Makefile.am
@@ -1,28 +1,40 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = log_test.py
-EXTRA_DIST = $(PYTESTS) log_console.py.in console.out check_output.sh
+PYTESTS_GEN = log_console.py
+PYTESTS_NOGEN = log_test.py
+noinst_SCRIPTS = $(PYTESTS_GEN)
+EXTRA_DIST = console.out check_output.sh $(PYTESTS_NOGEN)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
+# We need to run the cycle twice, because once the files are in builddir, once in srcdir
check-local:
+ chmod +x $(abs_builddir)/log_console.py
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log \
$(abs_srcdir)/check_output.sh $(abs_builddir)/log_console.py $(abs_srcdir)/console.out
if ENABLE_PYTHON_COVERAGE
touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
- for pytest in $(PYTESTS) ; do \
+ for pytest in $(PYTESTS_NOGEN) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done ; \
+ for pytest in $(PYTESTS_GEN) ; do \
+ echo Running test: $$pytest ; \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
+ B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
+ $(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/log_messages/Makefile.am b/src/lib/python/isc/log_messages/Makefile.am
new file mode 100644
index 0000000..30f8374
--- /dev/null
+++ b/src/lib/python/isc/log_messages/Makefile.am
@@ -0,0 +1,32 @@
+SUBDIRS = work
+
+EXTRA_DIST = __init__.py
+EXTRA_DIST += bind10_messages.py
+EXTRA_DIST += cmdctl_messages.py
+EXTRA_DIST += stats_messages.py
+EXTRA_DIST += stats_httpd_messages.py
+EXTRA_DIST += xfrin_messages.py
+EXTRA_DIST += xfrout_messages.py
+EXTRA_DIST += zonemgr_messages.py
+EXTRA_DIST += cfgmgr_messages.py
+EXTRA_DIST += config_messages.py
+EXTRA_DIST += notify_out_messages.py
+EXTRA_DIST += libxfrin_messages.py
+
+CLEANFILES = __init__.pyc
+CLEANFILES += bind10_messages.pyc
+CLEANFILES += cmdctl_messages.pyc
+CLEANFILES += stats_messages.pyc
+CLEANFILES += stats_httpd_messages.pyc
+CLEANFILES += xfrin_messages.pyc
+CLEANFILES += xfrout_messages.pyc
+CLEANFILES += zonemgr_messages.pyc
+CLEANFILES += cfgmgr_messages.pyc
+CLEANFILES += config_messages.pyc
+CLEANFILES += notify_out_messages.pyc
+CLEANFILES += libxfrin_messages.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/log_messages/README b/src/lib/python/isc/log_messages/README
new file mode 100644
index 0000000..c96f78c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/README
@@ -0,0 +1,68 @@
+This is a placeholder package for logging messages of various modules
+in the form of python scripts. This package is expected to be installed
+somewhere like <top-install-dir>/python3.x/site-packages/isc/log_messages
+and each message script is expected to be imported as
+"isc.log_messages.some_module_messages".
+
+We also need to allow in-source test code to get access to the message
+scripts in the same manner. That's why the package is stored in the
+directory that shares the same trailing part as the install directory,
+i.e., isc/log_messages.
+
+Furthermore, we need to support a build mode using a separate build
+tree (such as in the case with 'make distcheck'). In that case if an
+application (via a test script) imports "isc.log_messages.xxx", it
+would try to import the module under the source tree, where the
+generated message script doesn't exist. So, in the source directory
+(i.e., here) we provide dummy scripts that subsequently import the
+same name of module under the "work" sub-package. The caller
+application is assumed to have <top_builddir>/src/lib/python/isc/log_messages
+in its module search path (this is done by including
+$(COMMON_PYTHON_PATH) in the PYTHONPATH environment variable),
+which ensures the right directory is chosen.
+
+A python module or program that defines its own log messages needs to
+make sure that the setup described above is implemented. It's a
+complicated process, but can generally be done by following a common
+pattern:
+
+1. Create the dummy script (see above) for the module and update
+ Makefile.am in this directory accordingly. See (and use)
+ a helper shell script named gen-forwarder.sh.
+2. Update Makefil.am of the module that defines the log message. The
+ following are a sample snippet for Makefile.am for a module named
+ "mymodule" (which is supposed to be generated from a file
+ "mymodule_messages.mes"). In many cases it should work simply by
+ replacing 'mymodule' with the actual module name.
+
+==================== begin Makefile.am additions ===================
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.pyc
+
+EXTRA_DIST = mymodule_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py : mymodule_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/mymodule_messages.mes
+
+# This rule ensures mymodule_messages.py is (re)generated as a result of
+# 'make'. If there's no other appropriate target, specify
+# mymodule_messages.py in BUILT_SOURCES.
+mymodule: <other source files> $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+===================== end Makefile.am additions ====================
+
+Notes:
+- "nodist_" prefix is important. Without this, 'make distcheck' tries
+ to make _messages.py before actually starting the main build, which
+ would fail because the message compiler isn't built yet.
+- "pylogmessage" is a prefix for python scripts that define log
+ messages and are expected to be installed in the common isc/log_messages
+ directory. It's intentionally named differently from the common
+ "python" prefix (as in python_PYTHON), because the latter may be
+ used for other scripts in the same Makefile.am file.
+- $(PYTHON_LOGMSGPKG_DIR) should be set to point to this directory (or
+ the corresponding build directory if it's different) by the
+ configure script.
diff --git a/src/lib/python/isc/log_messages/__init__.py b/src/lib/python/isc/log_messages/__init__.py
new file mode 100644
index 0000000..d222b8c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/__init__.py
@@ -0,0 +1,3 @@
+"""
+This is an in-source forwarder package redirecting to work/* scripts.
+"""
diff --git a/src/lib/python/isc/log_messages/bind10_messages.py b/src/lib/python/isc/log_messages/bind10_messages.py
new file mode 100644
index 0000000..68ce94c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/bind10_messages.py
@@ -0,0 +1 @@
+from work.bind10_messages import *
diff --git a/src/lib/python/isc/log_messages/cfgmgr_messages.py b/src/lib/python/isc/log_messages/cfgmgr_messages.py
new file mode 100644
index 0000000..5557100
--- /dev/null
+++ b/src/lib/python/isc/log_messages/cfgmgr_messages.py
@@ -0,0 +1 @@
+from work.cfgmgr_messages import *
diff --git a/src/lib/python/isc/log_messages/cmdctl_messages.py b/src/lib/python/isc/log_messages/cmdctl_messages.py
new file mode 100644
index 0000000..7283d5a
--- /dev/null
+++ b/src/lib/python/isc/log_messages/cmdctl_messages.py
@@ -0,0 +1 @@
+from work.cmdctl_messages import *
diff --git a/src/lib/python/isc/log_messages/config_messages.py b/src/lib/python/isc/log_messages/config_messages.py
new file mode 100644
index 0000000..c557975
--- /dev/null
+++ b/src/lib/python/isc/log_messages/config_messages.py
@@ -0,0 +1 @@
+from work.config_messages import *
diff --git a/src/lib/python/isc/log_messages/gen-forwarder.sh b/src/lib/python/isc/log_messages/gen-forwarder.sh
new file mode 100755
index 0000000..84c2450
--- /dev/null
+++ b/src/lib/python/isc/log_messages/gen-forwarder.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+MODULE_NAME=$1
+if test -z $MODULE_NAME; then
+ echo 'Usage: gen-forwarder.sh module_name'
+ exit 1
+fi
+
+echo "from work.${MODULE_NAME}_messages import *" > ${MODULE_NAME}_messages.py
+echo "Forwarder python script is generated. Make sure to perform:"
+echo "git add ${MODULE_NAME}_messages.py"
+echo "and add the following to Makefile.am:"
+echo "EXTRA_DIST += ${MODULE_NAME}_messages.py"
+echo "CLEANFILES += ${MODULE_NAME}_messages.pyc"
diff --git a/src/lib/python/isc/log_messages/libxfrin_messages.py b/src/lib/python/isc/log_messages/libxfrin_messages.py
new file mode 100644
index 0000000..74da329
--- /dev/null
+++ b/src/lib/python/isc/log_messages/libxfrin_messages.py
@@ -0,0 +1 @@
+from work.libxfrin_messages import *
diff --git a/src/lib/python/isc/log_messages/notify_out_messages.py b/src/lib/python/isc/log_messages/notify_out_messages.py
new file mode 100644
index 0000000..6aa37ea
--- /dev/null
+++ b/src/lib/python/isc/log_messages/notify_out_messages.py
@@ -0,0 +1 @@
+from work.notify_out_messages import *
diff --git a/src/lib/python/isc/log_messages/stats_httpd_messages.py b/src/lib/python/isc/log_messages/stats_httpd_messages.py
new file mode 100644
index 0000000..7782c34
--- /dev/null
+++ b/src/lib/python/isc/log_messages/stats_httpd_messages.py
@@ -0,0 +1 @@
+from work.stats_httpd_messages import *
diff --git a/src/lib/python/isc/log_messages/stats_messages.py b/src/lib/python/isc/log_messages/stats_messages.py
new file mode 100644
index 0000000..1324cfc
--- /dev/null
+++ b/src/lib/python/isc/log_messages/stats_messages.py
@@ -0,0 +1 @@
+from work.stats_messages import *
diff --git a/src/lib/python/isc/log_messages/work/Makefile.am b/src/lib/python/isc/log_messages/work/Makefile.am
new file mode 100644
index 0000000..9bc5e0f
--- /dev/null
+++ b/src/lib/python/isc/log_messages/work/Makefile.am
@@ -0,0 +1,12 @@
+# .py is generated in the builddir by the configure script so that test
+# scripts can refer to it when a separate builddir is used.
+
+python_PYTHON = __init__.py
+
+pythondir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = __init__.pyc
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/log_messages/work/__init__.py.in b/src/lib/python/isc/log_messages/work/__init__.py.in
new file mode 100644
index 0000000..991f10a
--- /dev/null
+++ b/src/lib/python/isc/log_messages/work/__init__.py.in
@@ -0,0 +1,3 @@
+"""
+This package is a placeholder for python scripts of log messages.
+"""
diff --git a/src/lib/python/isc/log_messages/xfrin_messages.py b/src/lib/python/isc/log_messages/xfrin_messages.py
new file mode 100644
index 0000000..b412519
--- /dev/null
+++ b/src/lib/python/isc/log_messages/xfrin_messages.py
@@ -0,0 +1 @@
+from work.xfrin_messages import *
diff --git a/src/lib/python/isc/log_messages/xfrout_messages.py b/src/lib/python/isc/log_messages/xfrout_messages.py
new file mode 100644
index 0000000..2093d5c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/xfrout_messages.py
@@ -0,0 +1 @@
+from work.xfrout_messages import *
diff --git a/src/lib/python/isc/log_messages/zonemgr_messages.py b/src/lib/python/isc/log_messages/zonemgr_messages.py
new file mode 100644
index 0000000..b3afe9c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/zonemgr_messages.py
@@ -0,0 +1 @@
+from work.zonemgr_messages import *
diff --git a/src/lib/python/isc/net/tests/Makefile.am b/src/lib/python/isc/net/tests/Makefile.am
index 3a04f17..dd94946 100644
--- a/src/lib/python/isc/net/tests/Makefile.am
+++ b/src/lib/python/isc/net/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +19,6 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/notify/Makefile.am b/src/lib/python/isc/notify/Makefile.am
index a23a1ff..c247ab8 100644
--- a/src/lib/python/isc/notify/Makefile.am
+++ b/src/lib/python/isc/notify/Makefile.am
@@ -1,21 +1,22 @@
SUBDIRS = . tests
python_PYTHON = __init__.py notify_out.py
-pyexec_DATA = $(top_builddir)/src/lib/python/notify_out_messages.py
-
pythondir = $(pyexecdir)/isc/notify
-$(top_builddir)/src/lib/python/notify_out_messages.py: notify_out_messages.mes
- $(top_builddir)/src/lib/log/compiler/message \
- -p -d $(top_builddir)/src/lib/python \
- $(top_srcdir)/src/lib/python/isc/notify/notify_out_messages.mes
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
EXTRA_DIST = notify_out_messages.mes
-CLEANFILES = $(top_builddir)/src/lib/python/notify_out_messages.pyc
-CLEANFILES += $(top_builddir)/src/lib/python/notify_out_messages.py
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.pyc
CLEANDIRS = __pycache__
+$(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py : notify_out_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/notify_out_messages.mes
+
clean-local:
rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index f1e02ca..6b91c87 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -23,7 +23,7 @@ import errno
from isc.datasrc import sqlite3_ds
from isc.net import addr
import isc
-from notify_out_messages import *
+from isc.log_messages.notify_out_messages import *
logger = isc.log.Logger("notify_out")
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index 1427d93..00c2eee 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -18,7 +18,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/util/tests/Makefile.am b/src/lib/python/isc/util/tests/Makefile.am
index c3d35c2..3b882b4 100644
--- a/src/lib/python/isc/util/tests/Makefile.am
+++ b/src/lib/python/isc/util/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +19,6 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/xfrin/Makefile.am b/src/lib/python/isc/xfrin/Makefile.am
new file mode 100644
index 0000000..5804de6
--- /dev/null
+++ b/src/lib/python/isc/xfrin/Makefile.am
@@ -0,0 +1,23 @@
+SUBDIRS = . tests
+
+python_PYTHON = __init__.py diff.py
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+EXTRA_DIST = libxfrin_messages.mes
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.pyc
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py: libxfrin_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/libxfrin_messages.mes
+
+pythondir = $(pyexecdir)/isc/xfrin
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/xfrin/__init__.py b/src/lib/python/isc/xfrin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
new file mode 100644
index 0000000..a2d9a7d
--- /dev/null
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -0,0 +1,237 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This helps the XFR in process with accumulating parts of diff and applying
+it to the datasource.
+
+The name of the module is not yet fully decided. We might want to move it
+under isc.datasrc or somewhere else, because we might want to reuse it with
+future DDNS process. But until then, it lives here.
+"""
+
+import isc.dns
+import isc.log
+from isc.log_messages.libxfrin_messages import *
+
+class NoSuchZone(Exception):
+ """
+ This is raised if a diff for non-existant zone is being created.
+ """
+ pass
+
+"""
+This is the amount of changes we accumulate before calling Diff.apply
+automatically.
+
+The number 100 is just taken from BIND 9. We don't know the rationale
+for exactly this amount, but we think it is just some randomly chosen
+number.
+"""
+# If changing this, modify the tests accordingly as well.
+DIFF_APPLY_TRESHOLD = 100
+
+logger = isc.log.Logger('libxfrin')
+
+class Diff:
+ """
+ The class represents a diff against current state of datasource on
+ one zone. The usual way of working with it is creating it, then putting
+ bunch of changes in and commiting at the end.
+
+ If you change your mind, you can just stop using the object without
+ really commiting it. In that case no changes will happen in the data
+ sounce.
+
+ The class works as a kind of a buffer as well, it does not direct
+ the changes to underlying data source right away, but keeps them for
+ a while.
+ """
+ def __init__(self, ds_client, zone, replace=False):
+ """
+ Initializes the diff to a ready state. It checks the zone exists
+ in the datasource and if not, NoSuchZone is raised. This also creates
+ a transaction in the data source.
+
+ The ds_client is the datasource client containing the zone. Zone is
+ isc.dns.Name object representing the name of the zone (its apex).
+ If replace is true, the content of the whole zone is wiped out before
+ applying the diff.
+
+ You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented
+ exceptions.
+ """
+ self.__updater = ds_client.get_updater(zone, replace)
+ if self.__updater is None:
+ # The no such zone case
+ raise NoSuchZone("Zone " + str(zone) +
+ " does not exist in the data source " +
+ str(ds_client))
+ self.__buffer = []
+
+ def __check_commited(self):
+ """
+ This checks if the diff is already commited or broken. If it is, it
+ raises ValueError. This check is for methods that need to work only on
+ yet uncommited diffs.
+ """
+ if self.__updater is None:
+ raise ValueError("The diff is already commited or it has raised " +
+ "an exception, you come late")
+
+ def __data_common(self, rr, operation):
+ """
+ Schedules an operation with rr.
+
+ It does all the real work of add_data and delete_data, including
+ all checks.
+ """
+ self.__check_commited()
+ if rr.get_rdata_count() != 1:
+ raise ValueError('The rrset must contain exactly 1 Rdata, but ' +
+ 'it holds ' + str(rr.get_rdata_count()))
+ if rr.get_class() != self.__updater.get_class():
+ raise ValueError("The rrset's class " + str(rr.get_class()) +
+ " does not match updater's " +
+ str(self.__updater.get_class()))
+ self.__buffer.append((operation, rr))
+ if len(self.__buffer) >= DIFF_APPLY_TRESHOLD:
+ # Time to auto-apply, so the data don't accumulate too much
+ self.apply()
+
+ def add_data(self, rr):
+ """
+ Schedules addition of an RR into the zone in this diff.
+
+ The rr is of isc.dns.RRset type and it must contain only one RR.
+ If this is not the case or if the diff was already commited, this
+ raises the ValueError exception.
+
+ The rr class must match the one of the datasource client. If
+ it does not, ValueError is raised.
+ """
+ self.__data_common(rr, 'add')
+
+ def delete_data(self, rr):
+ """
+ Schedules deleting an RR from the zone in this diff.
+
+ The rr is of isc.dns.RRset type and it must contain only one RR.
+ If this is not the case or if the diff was already commited, this
+ raises the ValueError exception.
+
+ The rr class must match the one of the datasource client. If
+ it does not, ValueError is raised.
+ """
+ self.__data_common(rr, 'delete')
+
+ def compact(self):
+ """
+ Tries to compact the operations in buffer a little by putting some of
+ the operations together, forming RRsets with more than one RR.
+
+ This is called by apply before putting the data into datasource. You
+ may, but not have to, call this manually.
+
+ Currently it merges consecutive same operations on the same
+ domain/type. We could do more fancy things, like sorting by the domain
+ and do more merging, but such diffs should be rare in practice anyway,
+ so we don't bother and do it this simple way.
+ """
+ buf = []
+ for (op, rrset) in self.__buffer:
+ old = buf[-1][1] if len(buf) > 0 else None
+ if old is None or op != buf[-1][0] or \
+ rrset.get_name() != old.get_name() or \
+ rrset.get_type() != old.get_type():
+ buf.append((op, isc.dns.RRset(rrset.get_name(),
+ rrset.get_class(),
+ rrset.get_type(),
+ rrset.get_ttl())))
+ if rrset.get_ttl() != buf[-1][1].get_ttl():
+ logger.warn(LIBXFRIN_DIFFERENT_TTL, rrset.get_ttl(),
+ buf[-1][1].get_ttl())
+ for rdatum in rrset.get_rdata():
+ buf[-1][1].add_rdata(rdatum)
+ self.__buffer = buf
+
+ def apply(self):
+ """
+ Push the buffered changes inside this diff down into the data source.
+ This does not stop you from adding more changes later through this
+ diff and it does not close the datasource transaction, so the changes
+ will not be shown to others yet. It just means the internal memory
+ buffer is flushed.
+
+ This is called from time to time automatically, but you can call it
+ manually if you really want to.
+
+ This raises ValueError if the diff was already commited.
+
+ It also can raise isc.datasrc.Error. If that happens, you should stop
+ using this object and abort the modification.
+ """
+ self.__check_commited()
+ # First, compact the data
+ self.compact()
+ try:
+ # Then pass the data inside the data source
+ for (operation, rrset) in self.__buffer:
+ if operation == 'add':
+ self.__updater.add_rrset(rrset)
+ elif operation == 'delete':
+ self.__updater.delete_rrset(rrset)
+ else:
+ raise ValueError('Unknown operation ' + operation)
+ # As everything is already in, drop the buffer
+ except:
+ # If there's a problem, we can't continue.
+ self.__updater = None
+ raise
+
+ self.__buffer = []
+
+ def commit(self):
+ """
+ Writes all the changes into the data source and makes them visible.
+ This closes the diff, you may not use it any more. If you try to use
+ it, you'll get ValueError.
+
+ This might raise isc.datasrc.Error.
+ """
+ self.__check_commited()
+ # Push the data inside the data source
+ self.apply()
+ # Make sure they are visible.
+ try:
+ self.__updater.commit()
+ finally:
+ # Remove the updater. That will free some resources for one, but
+ # mark this object as already commited, so we can check
+
+ # We delete it even in case the commit failed, as that makes us
+ # unusable.
+ self.__updater = None
+
+ def get_buffer(self):
+ """
+ Returns the current buffer of changes not yet passed into the data
+ source. It is in a form like [('add', rrset), ('delete', rrset),
+ ('delete', rrset), ...].
+
+ Probably useful only for testing and introspection purposes. Don't
+ modify the list.
+ """
+ return self.__buffer
diff --git a/src/lib/python/isc/xfrin/libxfrin_messages.mes b/src/lib/python/isc/xfrin/libxfrin_messages.mes
new file mode 100644
index 0000000..be943c8
--- /dev/null
+++ b/src/lib/python/isc/xfrin/libxfrin_messages.mes
@@ -0,0 +1,21 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the libxfrin_messages python module.
+
+% LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
diff --git a/src/lib/python/isc/xfrin/tests/Makefile.am b/src/lib/python/isc/xfrin/tests/Makefile.am
new file mode 100644
index 0000000..416d62b
--- /dev/null
+++ b/src/lib/python/isc/xfrin/tests/Makefile.am
@@ -0,0 +1,24 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = diff_tests.py
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
new file mode 100644
index 0000000..9fab890
--- /dev/null
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -0,0 +1,446 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+import unittest
+from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
+from isc.xfrin.diff import Diff, NoSuchZone
+
+class TestError(Exception):
+ """
+ Just to have something to be raised during the tests.
+ Not used outside.
+ """
+ pass
+
+class DiffTest(unittest.TestCase):
+ """
+ Tests for the isc.xfrin.diff.Diff class.
+
+ It also plays role of a data source and an updater, so it can manipulate
+ some test variables while being called.
+ """
+ def setUp(self):
+ """
+ This sets internal variables so we can see nothing was called yet.
+
+ It also creates some variables used in multiple tests.
+ """
+ # Track what was called already
+ self.__updater_requested = False
+ self.__compact_called = False
+ self.__data_operations = []
+ self.__apply_called = False
+ self.__commit_called = False
+ self.__broken_called = False
+ self.__warn_called = False
+ self.__should_replace = False
+ # Some common values
+ self.__rrclass = RRClass.IN()
+ self.__type = RRType.A()
+ self.__ttl = RRTTL(3600)
+ # And RRsets
+ # Create two valid rrsets
+ self.__rrset1 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rdata = Rdata(self.__type, self.__rrclass, '192.0.2.1')
+ self.__rrset1.add_rdata(self.__rdata)
+ self.__rrset2 = RRset(Name('b.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset2.add_rdata(self.__rdata)
+ # And two invalid
+ self.__rrset_empty = RRset(Name('empty.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset_multi = RRset(Name('multi.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset_multi.add_rdata(self.__rdata)
+ self.__rrset_multi.add_rdata(Rdata(self.__type, self.__rrclass,
+ '192.0.2.2'))
+
+ def __mock_compact(self):
+ """
+ This can be put into the diff to hook into its compact method and see
+ if it gets called.
+ """
+ self.__compact_called = True
+
+ def __mock_apply(self):
+ """
+ This can be put into the diff to hook into its apply method and see
+ it gets called.
+ """
+ self.__apply_called = True
+
+ def __broken_operation(self, *args):
+ """
+ This can be used whenever an operation should fail. It raises TestError.
+ It should take whatever amount of parameters needed, so it can be put
+ quite anywhere.
+ """
+ self.__broken_called = True
+ raise TestError("Test error")
+
+ def warn(self, *args):
+ """
+ This is for checking the warn function was called, we replace the logger
+ in the tested module.
+ """
+ self.__warn_called = True
+
+ def commit(self):
+ """
+ This is part of pretending to be a zone updater. This notes the commit
+ was called.
+ """
+ self.__commit_called = True
+
+ def add_rrset(self, rrset):
+ """
+ This one is part of pretending to be a zone updater. It writes down
+ addition of an rrset was requested.
+ """
+ self.__data_operations.append(('add', rrset))
+
+ def delete_rrset(self, rrset):
+ """
+ This one is part of pretending to be a zone updater. It writes down
+ removal of an rrset was requested.
+ """
+ self.__data_operations.append(('delete', rrset))
+
+ def get_class(self):
+ """
+ This one is part of pretending to be a zone updater. It returns
+ the IN class.
+ """
+ return self.__rrclass
+
+ def get_updater(self, zone_name, replace):
+ """
+ This one pretends this is the data source client and serves
+ getting an updater.
+
+ If zone_name is 'none.example.org.', it returns None, otherwise
+ it returns self.
+ """
+ # The diff should not delete the old data.
+ self.assertEqual(self.__should_replace, replace)
+ self.__updater_requested = True
+ # Pretend this zone doesn't exist
+ if zone_name == Name('none.example.org.'):
+ return None
+ else:
+ return self
+
+ def test_create(self):
+ """
+ This test the case when the diff is successfuly created. It just
+ tries it does not throw and gets the updater.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.assertTrue(self.__updater_requested)
+ self.assertEqual([], diff.get_buffer())
+
+ def test_create_nonexist(self):
+ """
+ Try to create a diff on a zone that doesn't exist. This should
+ raise a correct exception.
+ """
+ self.assertRaises(NoSuchZone, Diff, self, Name('none.example.org.'))
+ self.assertTrue(self.__updater_requested)
+
+ def __data_common(self, diff, method, operation):
+ """
+ Common part of test for test_add and test_delte.
+ """
+ # Try putting there the bad data first
+ self.assertRaises(ValueError, method, self.__rrset_empty)
+ self.assertRaises(ValueError, method, self.__rrset_multi)
+ # They were not added
+ self.assertEqual([], diff.get_buffer())
+ # Put some proper data into the diff
+ method(self.__rrset1)
+ method(self.__rrset2)
+ dlist = [(operation, self.__rrset1), (operation, self.__rrset2)]
+ self.assertEqual(dlist, diff.get_buffer())
+ # Check the data are not destroyed by raising an exception because of
+ # bad data
+ self.assertRaises(ValueError, method, self.__rrset_empty)
+ self.assertEqual(dlist, diff.get_buffer())
+
+ def test_add(self):
+ """
+ Try to add few items into the diff and see they are stored in there.
+
+ Also try passing an rrset that has differnt amount of RRs than 1.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.__data_common(diff, diff.add_data, 'add')
+
+ def test_delete(self):
+ """
+ Try scheduling removal of few items into the diff and see they are
+ stored in there.
+
+ Also try passing an rrset that has different amount of RRs than 1.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.__data_common(diff, diff.delete_data, 'delete')
+
+ def test_apply(self):
+ """
+ Schedule few additions and check the apply works by passing the
+ data into the updater.
+ """
+ # Prepare the diff
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ diff.delete_data(self.__rrset2)
+ dlist = [('add', self.__rrset1), ('delete', self.__rrset2)]
+ self.assertEqual(dlist, diff.get_buffer())
+ # Do the apply, hook the compact method
+ diff.compact = self.__mock_compact
+ diff.apply()
+ # It should call the compact
+ self.assertTrue(self.__compact_called)
+ # And pass the data. Our local history of what happened is the same
+ # format, so we can check the same way
+ self.assertEqual(dlist, self.__data_operations)
+ # And the buffer in diff should become empty, as everything
+ # got inside.
+ self.assertEqual([], diff.get_buffer())
+
+ def test_commit(self):
+ """
+ If we call a commit, it should first apply whatever changes are
+ left (we hook into that instead of checking the effect) and then
+ the commit on the updater should have been called.
+
+ Then we check it raises value error for whatever operation we try.
+ """
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ orig_apply = diff.apply
+ diff.apply = self.__mock_apply
+ diff.commit()
+ self.assertTrue(self.__apply_called)
+ self.assertTrue(self.__commit_called)
+ # The data should be handled by apply which we replaced.
+ self.assertEqual([], self.__data_operations)
+ # Now check all range of other methods raise ValueError
+ self.assertRaises(ValueError, diff.commit)
+ self.assertRaises(ValueError, diff.add_data, self.__rrset2)
+ self.assertRaises(ValueError, diff.delete_data, self.__rrset1)
+ diff.apply = orig_apply
+ self.assertRaises(ValueError, diff.apply)
+ # This one does not state it should raise, so check it doesn't
+ # But it is NOP in this situation anyway
+ diff.compact()
+
+ def test_autoapply(self):
+ """
+ Test the apply is called all by itself after 100 tasks are added.
+ """
+ diff = Diff(self, Name('example.org.'))
+ # A method to check the apply is called _after_ the 100th element
+ # is added. We don't use it anywhere else, so we define it locally
+ # as lambda function
+ def check():
+ self.assertEqual(100, len(diff.get_buffer()))
+ self.__mock_apply()
+ orig_apply = diff.apply
+ diff.apply = check
+ # If we put 99, nothing happens yet
+ for i in range(0, 99):
+ diff.add_data(self.__rrset1)
+ expected = [('add', self.__rrset1)] * 99
+ self.assertEqual(expected, diff.get_buffer())
+ self.assertFalse(self.__apply_called)
+ # Now we push the 100th and it should call the apply method
+ # This will _not_ flush the data yet, as we replaced the method.
+ # It, however, would in the real life.
+ diff.add_data(self.__rrset1)
+ # Now the apply method (which is replaced by our check) should
+ # have been called. If it wasn't, this is false. If it was, but
+ # still with 99 elements, the check would complain
+ self.assertTrue(self.__apply_called)
+ # Reset the buffer by calling the original apply.
+ orig_apply()
+ self.assertEqual([], diff.get_buffer())
+ # Similar with delete
+ self.__apply_called = False
+ for i in range(0, 99):
+ diff.delete_data(self.__rrset2)
+ expected = [('delete', self.__rrset2)] * 99
+ self.assertEqual(expected, diff.get_buffer())
+ self.assertFalse(self.__apply_called)
+ diff.delete_data(self.__rrset2)
+ self.assertTrue(self.__apply_called)
+
+ def test_compact(self):
+ """
+ Test the compaction works as expected, eg. it compacts only consecutive
+ changes of the same operation and on the same domain/type.
+
+ The test case checks that it does merge them, but also puts some
+ different operations "in the middle", changes the type and name and
+ places the same kind of change further away of each other to see they
+ are not merged in that case.
+ """
+ diff = Diff(self, Name('example.org.'))
+ # Check we can do a compact on empty data, it shouldn't break
+ diff.compact()
+ self.assertEqual([], diff.get_buffer())
+ # This data is the way it should look like after the compact
+ # ('operation', 'domain.prefix', 'type', ['rdata', 'rdata'])
+ # The notes say why the each of consecutive can't be merged
+ data = [
+ ('add', 'a', 'A', ['192.0.2.1', '192.0.2.2']),
+ # Different type.
+ ('add', 'a', 'AAAA', ['2001:db8::1', '2001:db8::2']),
+ # Different operation
+ ('delete', 'a', 'AAAA', ['2001:db8::3']),
+ # Different domain
+ ('delete', 'b', 'AAAA', ['2001:db8::4']),
+ # This does not get merged with the first, even if logically
+ # possible. We just don't do this.
+ ('add', 'a', 'A', ['192.0.2.3'])
+ ]
+ # Now, fill the data into the diff, in a "flat" way, one by one
+ for (op, nprefix, rrtype, rdata) in data:
+ name = Name(nprefix + '.example.org.')
+ rrtype_obj = RRType(rrtype)
+ for rdatum in rdata:
+ rrset = RRset(name, self.__rrclass, rrtype_obj, self.__ttl)
+ rrset.add_rdata(Rdata(rrtype_obj, self.__rrclass, rdatum))
+ if op == 'add':
+ diff.add_data(rrset)
+ else:
+ diff.delete_data(rrset)
+ # Compact it
+ diff.compact()
+ # Now check they got compacted. They should be in the same order as
+ # pushed inside. So it should be the same as data modulo being in
+ # the rrsets and isc.dns objects.
+ def check():
+ buf = diff.get_buffer()
+ self.assertEqual(len(data), len(buf))
+ for (expected, received) in zip(data, buf):
+ (eop, ename, etype, edata) = expected
+ (rop, rrrset) = received
+ self.assertEqual(eop, rop)
+ ename_obj = Name(ename + '.example.org.')
+ self.assertEqual(ename_obj, rrrset.get_name())
+ # We check on names to make sure they are printed nicely
+ self.assertEqual(etype, str(rrrset.get_type()))
+ rdata = rrrset.get_rdata()
+ self.assertEqual(len(edata), len(rdata))
+ # It should also preserve the order
+ for (edatum, rdatum) in zip(edata, rdata):
+ self.assertEqual(edatum, str(rdatum))
+ check()
+ # Try another compact does nothing, but survives
+ diff.compact()
+ check()
+
+ def test_wrong_class(self):
+ """
+ Test a wrong class of rrset is rejected.
+ """
+ diff = Diff(self, Name('example.org.'))
+ rrset = RRset(Name('a.example.org.'), RRClass.CH(), RRType.NS(),
+ self.__ttl)
+ rrset.add_rdata(Rdata(RRType.NS(), RRClass.CH(), 'ns.example.org.'))
+ self.assertRaises(ValueError, diff.add_data, rrset)
+ self.assertRaises(ValueError, diff.delete_data, rrset)
+
+ def __do_raise_test(self):
+ """
+ Do a raise test. Expects that one of the operations is exchanged for
+ broken version.
+ """
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ diff.delete_data(self.__rrset2)
+ self.assertRaises(TestError, diff.commit)
+ self.assertTrue(self.__broken_called)
+ self.assertRaises(ValueError, diff.add_data, self.__rrset1)
+ self.assertRaises(ValueError, diff.delete_data, self.__rrset2)
+ self.assertRaises(ValueError, diff.commit)
+ self.assertRaises(ValueError, diff.apply)
+
+ def test_raise_add(self):
+ """
+ Test the exception from add_rrset is propagated and the diff can't be
+ used afterwards.
+ """
+ self.add_rrset = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_raise_delete(self):
+ """
+ Test the exception from delete_rrset is propagated and the diff can't be
+ used afterwards.
+ """
+ self.delete_rrset = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_raise_commit(self):
+ """
+ Test the exception from updater's commit gets propagated and it can't be
+ used afterwards.
+ """
+ self.commit = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_ttl(self):
+ """
+ Test the TTL handling. A warn function should have been called if they
+ differ, but that's all, it should not crash or raise.
+ """
+ orig_logger = isc.xfrin.diff.logger
+ try:
+ isc.xfrin.diff.logger = self
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ rrset2 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, RRTTL(120))
+ rrset2.add_rdata(Rdata(self.__type, self.__rrclass, '192.10.2.2'))
+ diff.add_data(rrset2)
+ rrset2 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, RRTTL(6000))
+ rrset2.add_rdata(Rdata(self.__type, self.__rrclass, '192.10.2.3'))
+ diff.add_data(rrset2)
+ # They should get compacted together and complain.
+ diff.compact()
+ self.assertEqual(1, len(diff.get_buffer()))
+ # The TTL stays on the first value, no matter if smaller or bigger
+ # ones come later.
+ self.assertEqual(self.__ttl, diff.get_buffer()[0][1].get_ttl())
+ self.assertTrue(self.__warn_called)
+ finally:
+ isc.xfrin.diff.logger = orig_logger
+
+ def test_relpace(self):
+ """
+ Test that when we want to replace the whole zone, it is propagated.
+ """
+ self.__should_replace = True
+ diff = Diff(self, "example.org.", True)
+ self.assertTrue(self.__updater_requested)
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ unittest.main()
diff --git a/src/lib/util/python/gen_wiredata.py.in b/src/lib/util/python/gen_wiredata.py.in
index 8e1f079..8bd2b3c 100755
--- a/src/lib/util/python/gen_wiredata.py.in
+++ b/src/lib/util/python/gen_wiredata.py.in
@@ -822,6 +822,49 @@ class RP(RR):
f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
f.write('%s %s\n' % (mailbox_wire, text_wire))
+class MINFO(RR):
+ '''Implements rendering MINFO RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - rmailbox (string): The rmailbox field.
+ - emailbox (string): The emailbox field.
+ These strings must be interpreted as a valid domain name.
+ '''
+ rmailbox = 'rmailbox.example.com'
+ emailbox = 'emailbox.example.com'
+ def dump(self, f):
+ rmailbox_wire = encode_name(self.rmailbox)
+ emailbox_wire = encode_name(self.emailbox)
+ if self.rdlen is None:
+ self.rdlen = (len(rmailbox_wire) + len(emailbox_wire)) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# RMAILBOX=%s EMAILBOX=%s\n' % (self.rmailbox, self.emailbox))
+ f.write('%s %s\n' % (rmailbox_wire, emailbox_wire))
+
+class AFSDB(RR):
+ '''Implements rendering AFSDB RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - subtype (16 bit int): The subtype field.
+ - server (string): The server field.
+ The string must be interpreted as a valid domain name.
+ '''
+ subtype = 1
+ server = 'afsdb.example.com'
+ def dump(self, f):
+ server_wire = encode_name(self.server)
+ if self.rdlen is None:
+ self.rdlen = 2 + len(server_wire) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# SUBTYPE=%d SERVER=%s\n' % (self.subtype, self.server))
+ f.write('%04x %s\n' % (self.subtype, server_wire))
+
class NSECBASE(RR):
'''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
these RRs. The NSEC and NSEC3 classes will be inherited from this
diff --git a/src/lib/util/python/pycppwrapper_util.h b/src/lib/util/python/pycppwrapper_util.h
index 3f396e2..462e715 100644
--- a/src/lib/util/python/pycppwrapper_util.h
+++ b/src/lib/util/python/pycppwrapper_util.h
@@ -293,7 +293,7 @@ protected:
/// \c PyObject_New() to the caller.
template <typename PYSTRUCT, typename CPPCLASS>
struct CPPPyObjectContainer : public PyObjectContainer {
- CPPPyObjectContainer(PYSTRUCT* obj) : PyObjectContainer(obj) {}
+ explicit CPPPyObjectContainer(PYSTRUCT* obj) : PyObjectContainer(obj) {}
// This method associates a C++ object with the corresponding python
// object enclosed in this class.
diff --git a/src/lib/util/python/wrapper_template.cc b/src/lib/util/python/wrapper_template.cc
index a703731..426ced5 100644
--- a/src/lib/util/python/wrapper_template.cc
+++ b/src/lib/util/python/wrapper_template.cc
@@ -299,8 +299,8 @@ initModulePart_ at CPPCLASS@(PyObject* mod) {
PyObject*
create at CPPCLASS@Object(const @CPPCLASS@& source) {
- @CPPCLASS at Container container =
- PyObject_New(s_ at CPPCLASS@, &@cppclass at _type);
+ @CPPCLASS at Container container(PyObject_New(s_ at CPPCLASS@,
+ &@cppclass at _type));
container.set(new @CPPCLASS@(source));
return (container.release());
}
diff --git a/src/lib/util/python/wrapper_template.h b/src/lib/util/python/wrapper_template.h
index d68a658..be701e1 100644
--- a/src/lib/util/python/wrapper_template.h
+++ b/src/lib/util/python/wrapper_template.h
@@ -37,15 +37,15 @@ bool initModulePart_ at CPPCLASS@(PyObject* mod);
// Note: this utility function works only when @CPPCLASS@ is a copy
// constructable.
// And, it would only be useful when python binding needs to create this
-// object frequently. Otherwise, it would (or should) probably better to
+// object frequently. Otherwise, it would (or should) probably be better to
// remove the declaration and definition of this function.
//
-/// This is A simple shortcut to create a python @CPPCLASS@ object (in the
+/// This is a simple shortcut to create a python @CPPCLASS@ object (in the
/// form of a pointer to PyObject) with minimal exception safety.
/// On success, it returns a valid pointer to PyObject with a reference
/// counter of 1; if something goes wrong it throws an exception (it never
/// returns a NULL pointer).
-/// This function is expected to be called with in a try block
+/// This function is expected to be called within a try block
/// followed by necessary setup for python exception.
PyObject* create at CPPCLASS@Object(const @CPPCLASS@& source);
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 6923c41..49ef0f1 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -24,6 +24,10 @@ SYSTEMTESTTOP=..
status=0
n=0
+# TODO: consider consistency with statistics definition in auth.spec
+auth_queries_tcp="\<queries\.tcp\>"
+auth_queries_udp="\<queries\.udp\>"
+
echo "I:Checking b10-auth is working by default ($n)"
$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
# perform a simple check on the output (digcomp would be too much for this)
@@ -40,8 +44,8 @@ echo 'Stats show
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# the server should have received 1 UDP and 1 TCP queries (TCP query was
# sent from the server startup script)
-grep "\"auth.queries.tcp\": 1," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<1\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -73,8 +77,8 @@ echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# The statistics counters should have been reset while stop/start.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -97,8 +101,8 @@ echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# The statistics counters shouldn't be reset due to hot-swapping datasource.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 2," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<2\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
diff --git a/tests/system/cleanall.sh b/tests/system/cleanall.sh
index 17c3d4a..d23d103 100755
--- a/tests/system/cleanall.sh
+++ b/tests/system/cleanall.sh
@@ -27,7 +27,10 @@ find . -type f \( \
status=0
-for d in `find . -type d -maxdepth 1 -mindepth 1 -print`
+for d in ./.* ./*
do
+ case $d in ./.|./..) continue ;; esac
+ test -d $d || continue
+
test ! -f $d/clean.sh || ( cd $d && sh clean.sh )
done
More information about the bind10-changes
mailing list